hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
008ff9e0f8cc480955e3942ca0be1cbe2c614e6a.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "unit_test.h"
#include "amg_config.h"
#include "test_utils.h"
#include "cutil.h"
#include "util.h"
#include "amg_solver.h"
#include "resources.h"
#include "aggregation/coarseAgenerators/coarse_A_generator.h"
#include "aggregation/selectors/agg_selector.h"
#include "matrix_coloring/matrix_coloring.h"
#include "matrix_coloring/min_max.h"
#include "solvers/solver.h"
#include "classical/selectors/selector.h"
#include "classical/interpolators/interpolator.h"
#include "classical/strength/strength.h"
#include <cusp/print.h>
#include <cusp/gallery/poisson.h>
#ifdef AMGX_WITH_MPI
#include <mpi.h>
#endif
namespace amgx
{
// This test tries to run amgx stuff on the matrix with some offdiagonal values equals to zero (but stored in the A.values array explicitly)
DECLARE_UNITTEST_BEGIN(ExplicitZeroValues);
typedef typename TConfig_h::template setVecPrec<(AMGX_VecPrecision)AMGX_GET_MODE_VAL(AMGX_MatPrecision, TConfig::mode)>::Type vvec_h;
typedef typename TConfig::template setVecPrec<AMGX_vecInt>::Type ivec;
typedef typename TConfig_h::template setVecPrec<AMGX_vecInt>::Type ivec_h;
// setup restriction on HOST
void fillRowOffsetsAndColIndices(const int num_aggregates,
Vector<ivec_h> aggregates,
const int R_num_cols,
Vector<ivec_h> &R_row_offsets,
Vector<ivec_h> &R_col_indices)
{
for (int i = 0; i < num_aggregates + 1; i++)
{
R_row_offsets[i] = 0;
}
// Count number of neighbors for each row
for (int i = 0; i < R_num_cols; i++)
{
int I = aggregates[i];
R_row_offsets[I]++;
}
R_row_offsets[num_aggregates] = R_num_cols;
for (int i = num_aggregates - 1; i >= 0; i--)
{
R_row_offsets[i] = R_row_offsets[i + 1] - R_row_offsets[i];
}
/* Set column indices. */
for (int i = 0; i < R_num_cols; i++)
{
int I = aggregates[i];
int Ip = R_row_offsets[I]++;
R_col_indices[Ip] = i;
}
/* Reset r[i] to start of row memory. */
for (int i = num_aggregates - 1; i > 0; i--)
{
R_row_offsets[i] = R_row_offsets[i - 1];
}
R_row_offsets[0] = 0;
}
void test_coarsers(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope)
{
Matrix<T_Config> Ac;
int num_aggregates = A.get_num_rows();
Vector<ivec_h> h_aggregates;
h_aggregates.resize( A.get_num_rows() );
for ( int i = 0; i < h_aggregates.size(); i++ )
{
h_aggregates[i] = i;
}
Vector<ivec_h> h_R_row_offsets;
Vector<ivec_h> h_R_col_indices;
h_R_row_offsets.resize( num_aggregates + 1 );
h_R_col_indices.resize( A.get_num_rows() );
fillRowOffsetsAndColIndices( num_aggregates, h_aggregates, A.get_num_rows(), h_R_row_offsets, h_R_col_indices );
Vector<ivec> aggregates = h_aggregates;
Vector<ivec> R_row_offsets = h_R_row_offsets;
Vector<ivec> R_col_indices = h_R_col_indices;
cudaCheckError();
typename aggregation::CoarseAGeneratorFactory<T_Config>::Iterator iter = aggregation::CoarseAGeneratorFactory<T_Config>::getIterator();
aggregation::CoarseAGenerator<TConfig> *generator;
while (!aggregation::CoarseAGeneratorFactory<T_Config>::isIteratorLast(iter))
{
//std::cout << "aggregator=" << iter->first << std::endl;
generator = NULL;
generator = iter->second->create(cfg, cfg_scope);
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
UNITTEST_ASSERT_TRUE_DESC("Generator is not created\n", generator != NULL);
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
generator->computeAOperator(A, Ac, aggregates, R_row_offsets, R_col_indices, num_aggregates);
UNITTEST_ASSERT_TRUE_DESC("Coarser matrix contains nans\n", !containsNan<ValueTypeA>(Ac.values.raw(), Ac.values.size()));
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (generator != NULL) { delete generator; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
void test_selectors(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope)
{
typename aggregation::SelectorFactory<T_Config>::Iterator iter = aggregation::SelectorFactory<T_Config>::getIterator();
aggregation::Selector<TConfig> *selector;
IVector vec, vec1;
int num;
while (!aggregation::SelectorFactory<T_Config>::isIteratorLast(iter))
{
string m_name = iter->first.c_str();
//printf("Trying selector %s\n", m_name.c_str());fflush(stdout);
if ((m_name.compare("GEO") == 0) || (m_name.compare("GEO_ONE_PHASE_HANDSHAKING") == 0) || (m_name.compare("PARALLEL_GREEDY_SELECTOR") == 0))
{
//printf("Skipping...\n");fflush(stdout);
++iter;
continue;
}
//std::cout << "selector=" << iter->first << std::endl;
selector = NULL;
PrintOnFail("processing: %s\n", iter->first.c_str());
selector = iter->second->create(cfg, cfg_scope);
PrintOnFail("Selector creation\n");
UNITTEST_ASSERT_TRUE(selector != NULL);
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
selector->setAggregates(A, vec, vec1, num);
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (selector != NULL) { delete selector; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
void test_matrix_coloring(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope)
{
MatrixColoring<TConfig> *color;
typename MatrixColoringFactory<T_Config>::Iterator iter = MatrixColoringFactory<T_Config>::getIterator();
while (!MatrixColoringFactory<T_Config>::isIteratorLast(iter))
{
//std::cout << "coloring=" << iter->first << std::endl;
color = NULL;
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
color = iter->second->create(cfg, cfg_scope);
UNITTEST_ASSERT_TRUE(color != NULL);
A.set_initialized(0);
A.colorMatrix(cfg, cfg_scope);
A.set_initialized(1);
int num_colors = A.getMatrixColoring().getNumColors();
UNITTEST_ASSERT_TRUE(num_colors != 0);
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (color != NULL) { delete color; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
template<class TConfig>
bool check_solver_mode_pair(string solver)
{
// skip IDR solvers because they don't handle diag zeros well
return ((solver != "FIXCOLOR_GS") &&
(solver != "KACZMARZ") &&
(solver != "IDR") &&
(solver != "IDRMSYNC"));
}
void test_solvers(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope)
{
#ifdef AMGX_WITH_MPI
int mpiFlag;
MPI_Initialized(&mpiFlag);
if ( !mpiFlag )
{
int argc = 1;
char **argv = NULL;
MPI_Init( &argc, &argv);
}
#endif
Vector<T_Config> b (A.get_num_rows()*A.get_block_dimy()), x (A.get_num_rows()*A.get_block_dimy());
cusp::blas::fill(b, 1);
b.set_block_dimx(1);
b.set_block_dimy(A.get_block_dimy());
x.set_block_dimx(1);
x.set_block_dimy(A.get_block_dimx());
Vector_h hx;
Solver<TConfig> *solver;
typename SolverFactory<T_Config>::Iterator iter = SolverFactory<T_Config>::getIterator();
while (!SolverFactory<T_Config>::isIteratorLast(iter))
{
//std::cout << "solver=" << iter->first << std::endl;
solver = NULL;
thrust::fill(x.begin(), x.end(), static_cast<ValueTypeB>(1.0));
//printf("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));fflush(stdout);
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
solver = iter->second->create(cfg, cfg_scope);
// its known that jacobi_l1 implementation for 4x4 fails because of block inverse in setup
// its known that fixcolor_gs fails on solve phase because of bad values during setup
if (solver != NULL && check_solver_mode_pair<TConfig>(iter->first))
{
solver->setup(A, false);
solver->set_max_iters(1);
if (TConfig::matPrec != AMGX_matFloat)
{
solver->solve(b, x, false);
}
hx = x;
hipDeviceSynchronize();
cudaCheckError();
// NaNs are expected since there are zero elements
// UNITTEST_ASSERT_TRUE_DESC("Smoother result contains nans\n", !containsNan<ValueTypeB>(x.raw(), x.size()));
}
// std::cout << iter->first << std::endl;
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (solver != NULL) { delete solver; solver = NULL; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
void generatePoissonForTest(Matrix<TConfig > &Aout, int block_size, bool diag_prop, int points, int x, int y, int z = 1)
{
Matrix<TConfig_h > Ac;
{
Matrix<TConfig_h > A;
A.set_initialized(0);
A.addProps(CSR);
MatrixCusp<TConfig_h, cusp::csr_format> wA(&A);
switch (points)
{
case 5:
cusp::gallery::poisson5pt(wA, x, y);
break;
case 7:
cusp::gallery::poisson7pt(wA, x, y, z);
break;
case 9:
cusp::gallery::poisson9pt(wA, x, y);
break;
case 27:
cusp::gallery::poisson27pt(wA, x, y, z);
break;
}
A.set_initialized(1);
Ac.convert( A, ( diag_prop ? DIAG : 0 ) | CSR, block_size, block_size );
Ac.set_initialized(1);
}
Aout = Ac;
}
void test_levels(Resources *res, Matrix<T_Config> &A)
{
Vector<T_Config> b (A.get_num_rows()*A.get_block_dimy()), x (A.get_num_rows()*A.get_block_dimy());
cusp::blas::fill(b, 1);
cusp::blas::fill(x, 1);
int bsize = A.get_block_dimy();
b.set_block_dimx(1);
b.set_block_dimy(bsize);
x.set_block_dimy(1);
x.set_block_dimx(bsize);
AMGX_STATUS solve_status;
if (!bsize > 1)
// Classical path will only work with block size 1, error handling below is not working for some reason
{
AMG_Configuration cfg;
AMGX_ERROR err = AMGX_OK;
UNITTEST_ASSERT_TRUE( cfg.parseParameterString("config_version=2, algorithm=CLASSICAL, smoother=MULTICOLOR_DILU, presweeps=1, postsweeps=1, matrix_coloring_scheme=MIN_MAX, determinism_flag=1, max_levels=2, max_iters=1, norm=L1, coloring_level=1") == AMGX_OK);
AMG_Solver<TConfig> amg(res, cfg);
err = amg.setup(A);
if (err != AMGX_ERR_NOT_SUPPORTED_TARGET && err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE && err != AMGX_ERR_NOT_IMPLEMENTED)
{
PrintOnFail("Classical algorithm: Matrix properties: blocksize = %d, diag_prop = %d\n", A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
UNITTEST_ASSERT_EQUAL(err, AMGX_OK);
err = amg.solve( b, x, solve_status, true);
if (err != AMGX_ERR_NOT_SUPPORTED_TARGET &&
err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE &&
err != AMGX_ERR_NOT_IMPLEMENTED)
{
UNITTEST_ASSERT_EQUAL(err, AMGX_OK);
PrintOnFail("Classical algorithm: Matrix properties: blocksize = %d, diag_prop = %d\n",
A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
UNITTEST_ASSERT_TRUE_DESC("Level solve result contains nans\n",
!containsNan<ValueTypeB>(x.raw(), x.size()));
}
}
}
cusp::blas::fill(x, 1);
{
AMG_Configuration cfg;
AMGX_ERROR err = AMGX_OK;
UNITTEST_ASSERT_TRUE( cfg.parseParameterString("config_version=2, algorithm=AGGREGATION, smoother=MULTICOLOR_DILU, presweeps=1, postsweeps=1, selector=SIZE_2, coarseAgenerator=LOW_DEG, matrix_coloring_scheme=MIN_MAX, determinism_flag=1, max_levels=2, max_iters=1, norm=L1, coloring_level=1") == AMGX_OK);
AMG_Solver<TConfig> amg(res, cfg);
err = amg.setup(A);
if (err != AMGX_ERR_NOT_SUPPORTED_TARGET && err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE && err != AMGX_ERR_NOT_IMPLEMENTED)
{
PrintOnFail("Aggregation algorithm: Matrix properties: blocksize = %d, diag_prop = %d\n", A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
UNITTEST_ASSERT_EQUAL(err, AMGX_OK);
err = amg.solve( b, x, solve_status, true);
if (err != AMGX_ERR_NOT_SUPPORTED_TARGET && err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE && err != AMGX_ERR_NOT_IMPLEMENTED)
{
UNITTEST_ASSERT_EQUAL(err, AMGX_OK);
PrintOnFail("Aggregation algorithm: Matrix properties: blocksize = %d, diag_prop = %d\n", A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
UNITTEST_ASSERT_TRUE_DESC("Level solve result contains nans\n", !containsNan<ValueTypeB>(x.raw(), x.size()));
}
}
}
}
void test_strength(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope, StrengthFactory<TConfig> **good )
{
//allocate necessary memory
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecInt>::Type> IVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecBool>::Type> BVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecFloat>::Type> FVector;
FVector weights(A.get_num_rows(), 0.0);
BVector s_con(A.get_num_nz(), false);
IVector cf_map(A.get_num_rows(), 0);
IVector scratch(A.get_num_rows(), 0); //scratch memory of size num_rows
//compute strong connections and weights
double max_row_sum = cfg.getParameter<double>("max_row_sum", cfg_scope);
Strength<T_Config> *strength;
typename StrengthFactory<T_Config>::Iterator iter = StrengthFactory<T_Config>::getIterator();
while (!StrengthFactory<T_Config>::isIteratorLast(iter))
{
strength = NULL;
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
strength = iter->second->create(cfg, cfg_scope);
UNITTEST_ASSERT_TRUE(strength != NULL);
if (strength != NULL)
{
strength->computeStrongConnectionsAndWeights(A, s_con, weights, max_row_sum);
UNITTEST_ASSERT_TRUE_DESC("Strength result contains nans\n", !containsNan<float>(weights.raw(), weights.size()));
*good = iter->second;
}
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (strength != NULL) { delete strength; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
void test_selectors(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope, StrengthFactory<TConfig> *strengthf, classical::SelectorFactory<TConfig> **good )
{
//allocate necessary memory
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecInt>::Type> IVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecBool>::Type> BVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecFloat>::Type> FVector;
FVector weights(A.get_num_rows(), 0.0);
BVector s_con(A.get_num_nz(), false);
IVector cf_map(A.get_num_rows(), 0);
IVector scratch(A.get_num_rows(), 0); //scratch memory of size num_rows
//compute strong connections and weights
double max_row_sum = cfg.getParameter<double>("max_row_sum", cfg_scope);
Strength<T_Config> *strength = strengthf->create(cfg, cfg_scope);
strength->computeStrongConnectionsAndWeights(A, s_con, weights, max_row_sum);
classical::Selector<T_Config> *selector;
typename classical::SelectorFactory<T_Config>::Iterator iter = classical::SelectorFactory<T_Config>::getIterator();
while (!classical::SelectorFactory<T_Config>::isIteratorLast(iter))
{
string m_name = iter->first.c_str();
if ((m_name.compare("GEO") == 0) || (m_name.compare("GEO_ONE_PHASE_HANDSHAKING") == 0))
{
++iter;
continue;
}
selector = NULL;
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
selector = iter->second->create();
UNITTEST_ASSERT_TRUE(strength != NULL);
if (selector != NULL)
{
selector->markCoarseFinePoints(A, weights, s_con, cf_map, scratch);
for (int i = 0; i < A.get_num_rows(); i++)
{
UNITTEST_ASSERT_TRUE(cf_map[i] != UNASSIGNED);
}
*good = iter->second;
}
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (selector != NULL) { delete selector; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
void test_interpolators(Resources *res, Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope, StrengthFactory<TConfig> *strengthf, classical::SelectorFactory<TConfig> *selectorf )
{
//allocate necessary memory
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecInt>::Type> IVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecBool>::Type> BVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecFloat>::Type> FVector;
Matrix<TConfig> P;
FVector weights(A.get_num_rows(), 0.0);
BVector s_con(A.get_num_nz(), false);
IVector cf_map(A.get_num_rows(), 0);
IVector scratch(A.get_num_rows(), 0); //scratch memory of size num_rows
//compute strong connections and weights
double max_row_sum = cfg.getParameter<double>("max_row_sum", cfg_scope);
Strength<T_Config> *strength = strengthf->create(cfg, cfg_scope);
classical::Selector<T_Config> *selector = selectorf->create();
strength->computeStrongConnectionsAndWeights(A, s_con, weights, max_row_sum);
selector->markCoarseFinePoints(A, weights, s_con, cf_map, scratch);
Interpolator<T_Config> *interpolator;
typename InterpolatorFactory<T_Config>::Iterator iter = InterpolatorFactory<T_Config>::getIterator();
AMG_Configuration scfg;
AMG_Solver<TConfig> amg(res, scfg);
while (!InterpolatorFactory<T_Config>::isIteratorLast(iter))
{
interpolator = NULL;
//printf("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));fflush(stdout);
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
interpolator = iter->second->create(cfg, cfg_scope);
UNITTEST_ASSERT_TRUE(strength != NULL);
if (interpolator != NULL)
{
interpolator->generateInterpolationMatrix(A, cf_map, s_con, scratch, P, &amg);
}
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (interpolator != NULL) { delete interpolator; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
// for a few rows replaces their first values with zeros. avoids diagonal values.
void random_add_zeros(Matrix<TConfig> &A, int max_zeros)
{
int zero_num = max_zeros;
while (zero_num)
{
int rowidx = (int)( ((float)rand() / RAND_MAX) * (A.get_num_rows() - 1) );
if (rowidx == A.col_indices[A.row_offsets[rowidx]]) { continue; }
int validx = A.row_offsets[rowidx];
thrust::fill(A.values.begin() + validx, A.values.begin() + validx + A.get_block_size(), static_cast<ValueTypeA>(0.0));
--zero_num;
}
}
void run()
{
randomize( 31 );
set_forge_ahead(true);
int nrows = 100;
for (int bsize = 1; bsize < 3; ++bsize)
{
AMG_Config cfg;
cfg.parseParameterString("config_version=2, determinism_flag=1, coloring_level=1, reorder_cols_by_color=1, insert_diag_while_reordering=1, preconditioner=BLOCK_JACOBI, min_coarse_rows=2");
const std::string &cfg_scope = "default";
Resources res; // default resources
for (int diagProp = 0; diagProp < 2; diagProp++)
{
//std::cout << "bsize=" << bsize << " diag=" << diagProp << std::endl;
MatrixA A;
VVector tb;
generateMatrixRandomStruct<TConfig>::generateExact(A, nrows, (diagProp != 0), bsize, false);
random_fill(A);
random_add_zeros(A, 1);
//////////////////////////
// MatrixIO<TConfig>::writeSystemMatrixMarket("test.mtx", &A, &tb, &tb);
// aggregation
test_coarsers(A, cfg, cfg_scope);
test_selectors(A, cfg, cfg_scope);
test_matrix_coloring(A, cfg, cfg_scope);
// Some solvers need a coloring. Make one.
A.set_initialized(0);
A.colorMatrix(cfg, cfg_scope);
A.set_initialized(1);
test_solvers(A, cfg, cfg_scope);
// classical
//TODO: if strength cannot process matrix
if (bsize == 1)
{
StrengthFactory<TConfig> *good_strength = NULL;
test_strength(A, cfg, cfg_scope, &good_strength);
if (good_strength != NULL)
{
classical::SelectorFactory<TConfig> *good_selector = NULL;
test_selectors(A, cfg, cfg_scope, good_strength, &good_selector);
if (good_selector != NULL)
{
//test_interpolators(&res, A, cfg, cfg_scope, good_strength, good_selector );
}
}
}
// levels
test_levels(&res, A);
}
}
}
DECLARE_UNITTEST_END(ExplicitZeroValues);
#define AMGX_CASE_LINE(CASE) ExplicitZeroValues <TemplateMode<CASE>::Type> ExplicitZeroValues_##CASE;
AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
//ExplicitZeroValues <TemplateMode<AMGX_mode_dDDI>::Type> ExplicitZeroValues_dDDI;
} //namespace amgx
| 008ff9e0f8cc480955e3942ca0be1cbe2c614e6a.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "unit_test.h"
#include "amg_config.h"
#include "test_utils.h"
#include "cutil.h"
#include "util.h"
#include "amg_solver.h"
#include "resources.h"
#include "aggregation/coarseAgenerators/coarse_A_generator.h"
#include "aggregation/selectors/agg_selector.h"
#include "matrix_coloring/matrix_coloring.h"
#include "matrix_coloring/min_max.h"
#include "solvers/solver.h"
#include "classical/selectors/selector.h"
#include "classical/interpolators/interpolator.h"
#include "classical/strength/strength.h"
#include <cusp/print.h>
#include <cusp/gallery/poisson.h>
#ifdef AMGX_WITH_MPI
#include <mpi.h>
#endif
namespace amgx
{
// This test tries to run amgx stuff on the matrix with some offdiagonal values equals to zero (but stored in the A.values array explicitly)
DECLARE_UNITTEST_BEGIN(ExplicitZeroValues);
typedef typename TConfig_h::template setVecPrec<(AMGX_VecPrecision)AMGX_GET_MODE_VAL(AMGX_MatPrecision, TConfig::mode)>::Type vvec_h;
typedef typename TConfig::template setVecPrec<AMGX_vecInt>::Type ivec;
typedef typename TConfig_h::template setVecPrec<AMGX_vecInt>::Type ivec_h;
// setup restriction on HOST
void fillRowOffsetsAndColIndices(const int num_aggregates,
Vector<ivec_h> aggregates,
const int R_num_cols,
Vector<ivec_h> &R_row_offsets,
Vector<ivec_h> &R_col_indices)
{
for (int i = 0; i < num_aggregates + 1; i++)
{
R_row_offsets[i] = 0;
}
// Count number of neighbors for each row
for (int i = 0; i < R_num_cols; i++)
{
int I = aggregates[i];
R_row_offsets[I]++;
}
R_row_offsets[num_aggregates] = R_num_cols;
for (int i = num_aggregates - 1; i >= 0; i--)
{
R_row_offsets[i] = R_row_offsets[i + 1] - R_row_offsets[i];
}
/* Set column indices. */
for (int i = 0; i < R_num_cols; i++)
{
int I = aggregates[i];
int Ip = R_row_offsets[I]++;
R_col_indices[Ip] = i;
}
/* Reset r[i] to start of row memory. */
for (int i = num_aggregates - 1; i > 0; i--)
{
R_row_offsets[i] = R_row_offsets[i - 1];
}
R_row_offsets[0] = 0;
}
void test_coarsers(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope)
{
Matrix<T_Config> Ac;
int num_aggregates = A.get_num_rows();
Vector<ivec_h> h_aggregates;
h_aggregates.resize( A.get_num_rows() );
for ( int i = 0; i < h_aggregates.size(); i++ )
{
h_aggregates[i] = i;
}
Vector<ivec_h> h_R_row_offsets;
Vector<ivec_h> h_R_col_indices;
h_R_row_offsets.resize( num_aggregates + 1 );
h_R_col_indices.resize( A.get_num_rows() );
fillRowOffsetsAndColIndices( num_aggregates, h_aggregates, A.get_num_rows(), h_R_row_offsets, h_R_col_indices );
Vector<ivec> aggregates = h_aggregates;
Vector<ivec> R_row_offsets = h_R_row_offsets;
Vector<ivec> R_col_indices = h_R_col_indices;
cudaCheckError();
typename aggregation::CoarseAGeneratorFactory<T_Config>::Iterator iter = aggregation::CoarseAGeneratorFactory<T_Config>::getIterator();
aggregation::CoarseAGenerator<TConfig> *generator;
while (!aggregation::CoarseAGeneratorFactory<T_Config>::isIteratorLast(iter))
{
//std::cout << "aggregator=" << iter->first << std::endl;
generator = NULL;
generator = iter->second->create(cfg, cfg_scope);
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
UNITTEST_ASSERT_TRUE_DESC("Generator is not created\n", generator != NULL);
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
generator->computeAOperator(A, Ac, aggregates, R_row_offsets, R_col_indices, num_aggregates);
UNITTEST_ASSERT_TRUE_DESC("Coarser matrix contains nans\n", !containsNan<ValueTypeA>(Ac.values.raw(), Ac.values.size()));
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (generator != NULL) { delete generator; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
void test_selectors(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope)
{
typename aggregation::SelectorFactory<T_Config>::Iterator iter = aggregation::SelectorFactory<T_Config>::getIterator();
aggregation::Selector<TConfig> *selector;
IVector vec, vec1;
int num;
while (!aggregation::SelectorFactory<T_Config>::isIteratorLast(iter))
{
string m_name = iter->first.c_str();
//printf("Trying selector %s\n", m_name.c_str());fflush(stdout);
if ((m_name.compare("GEO") == 0) || (m_name.compare("GEO_ONE_PHASE_HANDSHAKING") == 0) || (m_name.compare("PARALLEL_GREEDY_SELECTOR") == 0))
{
//printf("Skipping...\n");fflush(stdout);
++iter;
continue;
}
//std::cout << "selector=" << iter->first << std::endl;
selector = NULL;
PrintOnFail("processing: %s\n", iter->first.c_str());
selector = iter->second->create(cfg, cfg_scope);
PrintOnFail("Selector creation\n");
UNITTEST_ASSERT_TRUE(selector != NULL);
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
selector->setAggregates(A, vec, vec1, num);
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (selector != NULL) { delete selector; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
void test_matrix_coloring(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope)
{
MatrixColoring<TConfig> *color;
typename MatrixColoringFactory<T_Config>::Iterator iter = MatrixColoringFactory<T_Config>::getIterator();
while (!MatrixColoringFactory<T_Config>::isIteratorLast(iter))
{
//std::cout << "coloring=" << iter->first << std::endl;
color = NULL;
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
color = iter->second->create(cfg, cfg_scope);
UNITTEST_ASSERT_TRUE(color != NULL);
A.set_initialized(0);
A.colorMatrix(cfg, cfg_scope);
A.set_initialized(1);
int num_colors = A.getMatrixColoring().getNumColors();
UNITTEST_ASSERT_TRUE(num_colors != 0);
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (color != NULL) { delete color; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
template<class TConfig>
bool check_solver_mode_pair(string solver)
{
// skip IDR solvers because they don't handle diag zeros well
return ((solver != "FIXCOLOR_GS") &&
(solver != "KACZMARZ") &&
(solver != "IDR") &&
(solver != "IDRMSYNC"));
}
void test_solvers(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope)
{
#ifdef AMGX_WITH_MPI
int mpiFlag;
MPI_Initialized(&mpiFlag);
if ( !mpiFlag )
{
int argc = 1;
char **argv = NULL;
MPI_Init( &argc, &argv);
}
#endif
Vector<T_Config> b (A.get_num_rows()*A.get_block_dimy()), x (A.get_num_rows()*A.get_block_dimy());
cusp::blas::fill(b, 1);
b.set_block_dimx(1);
b.set_block_dimy(A.get_block_dimy());
x.set_block_dimx(1);
x.set_block_dimy(A.get_block_dimx());
Vector_h hx;
Solver<TConfig> *solver;
typename SolverFactory<T_Config>::Iterator iter = SolverFactory<T_Config>::getIterator();
while (!SolverFactory<T_Config>::isIteratorLast(iter))
{
//std::cout << "solver=" << iter->first << std::endl;
solver = NULL;
thrust::fill(x.begin(), x.end(), static_cast<ValueTypeB>(1.0));
//printf("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));fflush(stdout);
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
solver = iter->second->create(cfg, cfg_scope);
// its known that jacobi_l1 implementation for 4x4 fails because of block inverse in setup
// its known that fixcolor_gs fails on solve phase because of bad values during setup
if (solver != NULL && check_solver_mode_pair<TConfig>(iter->first))
{
solver->setup(A, false);
solver->set_max_iters(1);
if (TConfig::matPrec != AMGX_matFloat)
{
solver->solve(b, x, false);
}
hx = x;
cudaDeviceSynchronize();
cudaCheckError();
// NaNs are expected since there are zero elements
// UNITTEST_ASSERT_TRUE_DESC("Smoother result contains nans\n", !containsNan<ValueTypeB>(x.raw(), x.size()));
}
// std::cout << iter->first << std::endl;
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (solver != NULL) { delete solver; solver = NULL; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
void generatePoissonForTest(Matrix<TConfig > &Aout, int block_size, bool diag_prop, int points, int x, int y, int z = 1)
{
Matrix<TConfig_h > Ac;
{
Matrix<TConfig_h > A;
A.set_initialized(0);
A.addProps(CSR);
MatrixCusp<TConfig_h, cusp::csr_format> wA(&A);
switch (points)
{
case 5:
cusp::gallery::poisson5pt(wA, x, y);
break;
case 7:
cusp::gallery::poisson7pt(wA, x, y, z);
break;
case 9:
cusp::gallery::poisson9pt(wA, x, y);
break;
case 27:
cusp::gallery::poisson27pt(wA, x, y, z);
break;
}
A.set_initialized(1);
Ac.convert( A, ( diag_prop ? DIAG : 0 ) | CSR, block_size, block_size );
Ac.set_initialized(1);
}
Aout = Ac;
}
void test_levels(Resources *res, Matrix<T_Config> &A)
{
Vector<T_Config> b (A.get_num_rows()*A.get_block_dimy()), x (A.get_num_rows()*A.get_block_dimy());
cusp::blas::fill(b, 1);
cusp::blas::fill(x, 1);
int bsize = A.get_block_dimy();
b.set_block_dimx(1);
b.set_block_dimy(bsize);
x.set_block_dimy(1);
x.set_block_dimx(bsize);
AMGX_STATUS solve_status;
if (!bsize > 1)
// Classical path will only work with block size 1, error handling below is not working for some reason
{
AMG_Configuration cfg;
AMGX_ERROR err = AMGX_OK;
UNITTEST_ASSERT_TRUE( cfg.parseParameterString("config_version=2, algorithm=CLASSICAL, smoother=MULTICOLOR_DILU, presweeps=1, postsweeps=1, matrix_coloring_scheme=MIN_MAX, determinism_flag=1, max_levels=2, max_iters=1, norm=L1, coloring_level=1") == AMGX_OK);
AMG_Solver<TConfig> amg(res, cfg);
err = amg.setup(A);
if (err != AMGX_ERR_NOT_SUPPORTED_TARGET && err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE && err != AMGX_ERR_NOT_IMPLEMENTED)
{
PrintOnFail("Classical algorithm: Matrix properties: blocksize = %d, diag_prop = %d\n", A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
UNITTEST_ASSERT_EQUAL(err, AMGX_OK);
err = amg.solve( b, x, solve_status, true);
if (err != AMGX_ERR_NOT_SUPPORTED_TARGET &&
err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE &&
err != AMGX_ERR_NOT_IMPLEMENTED)
{
UNITTEST_ASSERT_EQUAL(err, AMGX_OK);
PrintOnFail("Classical algorithm: Matrix properties: blocksize = %d, diag_prop = %d\n",
A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
UNITTEST_ASSERT_TRUE_DESC("Level solve result contains nans\n",
!containsNan<ValueTypeB>(x.raw(), x.size()));
}
}
}
cusp::blas::fill(x, 1);
{
AMG_Configuration cfg;
AMGX_ERROR err = AMGX_OK;
UNITTEST_ASSERT_TRUE( cfg.parseParameterString("config_version=2, algorithm=AGGREGATION, smoother=MULTICOLOR_DILU, presweeps=1, postsweeps=1, selector=SIZE_2, coarseAgenerator=LOW_DEG, matrix_coloring_scheme=MIN_MAX, determinism_flag=1, max_levels=2, max_iters=1, norm=L1, coloring_level=1") == AMGX_OK);
AMG_Solver<TConfig> amg(res, cfg);
err = amg.setup(A);
if (err != AMGX_ERR_NOT_SUPPORTED_TARGET && err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE && err != AMGX_ERR_NOT_IMPLEMENTED)
{
PrintOnFail("Aggregation algorithm: Matrix properties: blocksize = %d, diag_prop = %d\n", A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
UNITTEST_ASSERT_EQUAL(err, AMGX_OK);
err = amg.solve( b, x, solve_status, true);
if (err != AMGX_ERR_NOT_SUPPORTED_TARGET && err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE && err != AMGX_ERR_NOT_IMPLEMENTED)
{
UNITTEST_ASSERT_EQUAL(err, AMGX_OK);
PrintOnFail("Aggregation algorithm: Matrix properties: blocksize = %d, diag_prop = %d\n", A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
UNITTEST_ASSERT_TRUE_DESC("Level solve result contains nans\n", !containsNan<ValueTypeB>(x.raw(), x.size()));
}
}
}
}
void test_strength(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope, StrengthFactory<TConfig> **good )
{
//allocate necessary memory
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecInt>::Type> IVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecBool>::Type> BVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecFloat>::Type> FVector;
FVector weights(A.get_num_rows(), 0.0);
BVector s_con(A.get_num_nz(), false);
IVector cf_map(A.get_num_rows(), 0);
IVector scratch(A.get_num_rows(), 0); //scratch memory of size num_rows
//compute strong connections and weights
double max_row_sum = cfg.getParameter<double>("max_row_sum", cfg_scope);
Strength<T_Config> *strength;
typename StrengthFactory<T_Config>::Iterator iter = StrengthFactory<T_Config>::getIterator();
while (!StrengthFactory<T_Config>::isIteratorLast(iter))
{
strength = NULL;
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
strength = iter->second->create(cfg, cfg_scope);
UNITTEST_ASSERT_TRUE(strength != NULL);
if (strength != NULL)
{
strength->computeStrongConnectionsAndWeights(A, s_con, weights, max_row_sum);
UNITTEST_ASSERT_TRUE_DESC("Strength result contains nans\n", !containsNan<float>(weights.raw(), weights.size()));
*good = iter->second;
}
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (strength != NULL) { delete strength; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
void test_selectors(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope, StrengthFactory<TConfig> *strengthf, classical::SelectorFactory<TConfig> **good )
{
//allocate necessary memory
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecInt>::Type> IVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecBool>::Type> BVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecFloat>::Type> FVector;
FVector weights(A.get_num_rows(), 0.0);
BVector s_con(A.get_num_nz(), false);
IVector cf_map(A.get_num_rows(), 0);
IVector scratch(A.get_num_rows(), 0); //scratch memory of size num_rows
//compute strong connections and weights
double max_row_sum = cfg.getParameter<double>("max_row_sum", cfg_scope);
Strength<T_Config> *strength = strengthf->create(cfg, cfg_scope);
strength->computeStrongConnectionsAndWeights(A, s_con, weights, max_row_sum);
classical::Selector<T_Config> *selector;
typename classical::SelectorFactory<T_Config>::Iterator iter = classical::SelectorFactory<T_Config>::getIterator();
while (!classical::SelectorFactory<T_Config>::isIteratorLast(iter))
{
string m_name = iter->first.c_str();
if ((m_name.compare("GEO") == 0) || (m_name.compare("GEO_ONE_PHASE_HANDSHAKING") == 0))
{
++iter;
continue;
}
selector = NULL;
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
selector = iter->second->create();
UNITTEST_ASSERT_TRUE(strength != NULL);
if (selector != NULL)
{
selector->markCoarseFinePoints(A, weights, s_con, cf_map, scratch);
for (int i = 0; i < A.get_num_rows(); i++)
{
UNITTEST_ASSERT_TRUE(cf_map[i] != UNASSIGNED);
}
*good = iter->second;
}
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (selector != NULL) { delete selector; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
void test_interpolators(Resources *res, Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope, StrengthFactory<TConfig> *strengthf, classical::SelectorFactory<TConfig> *selectorf )
{
//allocate necessary memory
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecInt>::Type> IVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecBool>::Type> BVector;
typedef Vector<typename T_Config::template setVecPrec<AMGX_vecFloat>::Type> FVector;
Matrix<TConfig> P;
FVector weights(A.get_num_rows(), 0.0);
BVector s_con(A.get_num_nz(), false);
IVector cf_map(A.get_num_rows(), 0);
IVector scratch(A.get_num_rows(), 0); //scratch memory of size num_rows
//compute strong connections and weights
double max_row_sum = cfg.getParameter<double>("max_row_sum", cfg_scope);
Strength<T_Config> *strength = strengthf->create(cfg, cfg_scope);
classical::Selector<T_Config> *selector = selectorf->create();
strength->computeStrongConnectionsAndWeights(A, s_con, weights, max_row_sum);
selector->markCoarseFinePoints(A, weights, s_con, cf_map, scratch);
Interpolator<T_Config> *interpolator;
typename InterpolatorFactory<T_Config>::Iterator iter = InterpolatorFactory<T_Config>::getIterator();
AMG_Configuration scfg;
AMG_Solver<TConfig> amg(res, scfg);
while (!InterpolatorFactory<T_Config>::isIteratorLast(iter))
{
interpolator = NULL;
//printf("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));fflush(stdout);
UNITTEST_ASSERT_EXCEPTION_START;
PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));
interpolator = iter->second->create(cfg, cfg_scope);
UNITTEST_ASSERT_TRUE(strength != NULL);
if (interpolator != NULL)
{
interpolator->generateInterpolationMatrix(A, cf_map, s_con, scratch, P, &amg);
}
UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED;
if (interpolator != NULL) { delete interpolator; }
++iter;
UNITTEST_ASSERT_TRUE(true);
}
}
// for a few rows replaces their first values with zeros. avoids diagonal values.
void random_add_zeros(Matrix<TConfig> &A, int max_zeros)
{
int zero_num = max_zeros;
while (zero_num)
{
int rowidx = (int)( ((float)rand() / RAND_MAX) * (A.get_num_rows() - 1) );
if (rowidx == A.col_indices[A.row_offsets[rowidx]]) { continue; }
int validx = A.row_offsets[rowidx];
thrust::fill(A.values.begin() + validx, A.values.begin() + validx + A.get_block_size(), static_cast<ValueTypeA>(0.0));
--zero_num;
}
}
void run()
{
randomize( 31 );
set_forge_ahead(true);
int nrows = 100;
for (int bsize = 1; bsize < 3; ++bsize)
{
AMG_Config cfg;
cfg.parseParameterString("config_version=2, determinism_flag=1, coloring_level=1, reorder_cols_by_color=1, insert_diag_while_reordering=1, preconditioner=BLOCK_JACOBI, min_coarse_rows=2");
const std::string &cfg_scope = "default";
Resources res; // default resources
for (int diagProp = 0; diagProp < 2; diagProp++)
{
//std::cout << "bsize=" << bsize << " diag=" << diagProp << std::endl;
MatrixA A;
VVector tb;
generateMatrixRandomStruct<TConfig>::generateExact(A, nrows, (diagProp != 0), bsize, false);
random_fill(A);
random_add_zeros(A, 1);
//////////////////////////
// MatrixIO<TConfig>::writeSystemMatrixMarket("test.mtx", &A, &tb, &tb);
// aggregation
test_coarsers(A, cfg, cfg_scope);
test_selectors(A, cfg, cfg_scope);
test_matrix_coloring(A, cfg, cfg_scope);
// Some solvers need a coloring. Make one.
A.set_initialized(0);
A.colorMatrix(cfg, cfg_scope);
A.set_initialized(1);
test_solvers(A, cfg, cfg_scope);
// classical
//TODO: if strength cannot process matrix
if (bsize == 1)
{
StrengthFactory<TConfig> *good_strength = NULL;
test_strength(A, cfg, cfg_scope, &good_strength);
if (good_strength != NULL)
{
classical::SelectorFactory<TConfig> *good_selector = NULL;
test_selectors(A, cfg, cfg_scope, good_strength, &good_selector);
if (good_selector != NULL)
{
//test_interpolators(&res, A, cfg, cfg_scope, good_strength, good_selector );
}
}
}
// levels
test_levels(&res, A);
}
}
}
DECLARE_UNITTEST_END(ExplicitZeroValues);
#define AMGX_CASE_LINE(CASE) ExplicitZeroValues <TemplateMode<CASE>::Type> ExplicitZeroValues_##CASE;
AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
//ExplicitZeroValues <TemplateMode<AMGX_mode_dDDI>::Type> ExplicitZeroValues_dDDI;
} //namespace amgx
|
10bb05ab97dd7ae1bddb1b81c545cfa8b25fef62.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "../DEM2DForceGPU.cu"
typedef DEMEvaluator<Scalar, Scalar4, WCAPotential<Scalar, Scalar4, NoFriction<Scalar> > > WCADEM;
template hipError_t gpu_compute_dem2d_forces<Scalar, Scalar2, Scalar4, WCADEM>(
Scalar4* d_force, Scalar4* d_torque, Scalar* d_virial,
const unsigned int virial_pitch, const unsigned int N, const unsigned int n_ghosts,
const Scalar4 *d_pos,
const Scalar4 *d_quat, const Scalar2 *d_vertices,
const unsigned int *d_num_shape_verts, const Scalar* d_diam,
const Scalar4 *d_velocity,
const unsigned int vertexCount, const BoxDim& box,
const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const WCADEM potential, const Scalar r_cutsq,
const unsigned int n_shapes,
const unsigned int particlesPerBlock, const unsigned int maxVerts);
| 10bb05ab97dd7ae1bddb1b81c545cfa8b25fef62.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "../DEM2DForceGPU.cu"
typedef DEMEvaluator<Scalar, Scalar4, WCAPotential<Scalar, Scalar4, NoFriction<Scalar> > > WCADEM;
template cudaError_t gpu_compute_dem2d_forces<Scalar, Scalar2, Scalar4, WCADEM>(
Scalar4* d_force, Scalar4* d_torque, Scalar* d_virial,
const unsigned int virial_pitch, const unsigned int N, const unsigned int n_ghosts,
const Scalar4 *d_pos,
const Scalar4 *d_quat, const Scalar2 *d_vertices,
const unsigned int *d_num_shape_verts, const Scalar* d_diam,
const Scalar4 *d_velocity,
const unsigned int vertexCount, const BoxDim& box,
const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const WCADEM potential, const Scalar r_cutsq,
const unsigned int n_shapes,
const unsigned int particlesPerBlock, const unsigned int maxVerts);
|
13ecc5badf1d69d63a5d2b27c928f2fb079ef869.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include <hip/hip_runtime.h>
#include "oneflow/core/kernel/cuda_graph_support.h"
#include "oneflow/core/cuda/elementwise.cuh"
namespace oneflow {
namespace {
constexpr int32_t kWarpSize = 32;
template<typename T, typename IndexType, int pack_size, bool tail>
__global__ void VectorizedReluDropoutBitmaskBackwardKernel(
const IndexType elem_cnt, const IndexType cols, const IndexType aux_ld, const float scale,
const IndexType n_tail, const IndexType tail_offset, const T* dy, const int32_t* mask, T* dx) {
int32_t global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
using LoadStoreType = cuda::elementwise::PackType<T, pack_size>;
using LoadStorePack = cuda::elementwise::Pack<T, pack_size>;
T t_scale = static_cast<T>(scale);
for (IndexType linear_pack_index = global_thread_id * pack_size; linear_pack_index < elem_cnt;
linear_pack_index += gridDim.x * blockDim.x * pack_size) {
const LoadStoreType* dy_load = reinterpret_cast<const LoadStoreType*>(dy + linear_pack_index);
LoadStorePack dy_vec;
dy_vec.storage = *dy_load;
LoadStorePack dx_vec;
#pragma unroll
for (int i = 0; i < pack_size; i++) {
const IndexType linear_index = (linear_pack_index + i);
const IndexType row = linear_index / cols;
const IndexType col = linear_index - row * cols;
const int32_t col_mod_warpsize = col % kWarpSize;
const IndexType aux_idx = ((row * aux_ld) + col) / kWarpSize;
bool is_positive = mask[aux_idx] & (1 << col_mod_warpsize);
dx_vec.elem[i] =
dy_vec.elem[i] * static_cast<T>(static_cast<float>(is_positive)) * static_cast<T>(scale);
}
*(reinterpret_cast<LoadStoreType*>(dx + linear_pack_index)) = dx_vec.storage;
}
if (tail && global_thread_id < n_tail) {
const IndexType tail_index = tail_offset + global_thread_id;
const IndexType tail_row = tail_index / cols;
const IndexType tail_col = tail_index - tail_row * cols;
const IndexType tail_col_mod_warpsize = tail_col % kWarpSize;
const IndexType tail_aux_idx = ((tail_row * aux_ld) + tail_col) / kWarpSize;
bool is_positive = mask[tail_aux_idx] & (1 << tail_col_mod_warpsize);
dx[tail_index] =
dy[tail_index] * static_cast<T>(static_cast<float>(is_positive)) * static_cast<T>(scale);
}
}
template<typename T>
void LaunchVectorizedReluDropoutBackwardKernel(ep::Stream* stream, const int64_t elem_cnt,
const int64_t cols, const int64_t aux_ld,
float scale, const T* dy, const int32_t* mask,
T* dx) {
constexpr int pack_size = cuda::elementwise::PackSize<T>();
const int64_t pack_num = elem_cnt / pack_size;
const int64_t tail_offset = pack_num * pack_size;
const int64_t n_tail = elem_cnt - tail_offset;
const bool tail = n_tail > 0 ? true : false;
if (tail) {
if (elem_cnt < GetMaxVal<int32_t>()) {
stream->As<ep::CudaStream>()->LaunchKernelDefaultWaves(
(VectorizedReluDropoutBitmaskBackwardKernel<T, int32_t, pack_size, true>),
std::max<int64_t>(1, pack_num), elem_cnt, cols, aux_ld, scale, n_tail, tail_offset, dy,
mask, dx);
} else {
stream->As<ep::CudaStream>()->LaunchKernelDefaultWaves(
(VectorizedReluDropoutBitmaskBackwardKernel<T, int64_t, pack_size, true>),
std::max<int64_t>(1, pack_num), elem_cnt, cols, aux_ld, scale, n_tail, tail_offset, dy,
mask, dx);
}
} else {
if (elem_cnt < GetMaxVal<int32_t>()) {
stream->As<ep::CudaStream>()->LaunchKernelDefaultWaves(
(VectorizedReluDropoutBitmaskBackwardKernel<T, int32_t, pack_size, false>),
std::max<int64_t>(1, pack_num), elem_cnt, cols, aux_ld, scale, /*n_tail=*/0, tail_offset,
dy, mask, dx);
} else {
stream->As<ep::CudaStream>()->LaunchKernelDefaultWaves(
(VectorizedReluDropoutBitmaskBackwardKernel<T, int64_t, pack_size, false>),
std::max<int64_t>(1, pack_num), elem_cnt, cols, aux_ld, scale, /*n_tail=*/0, tail_offset,
dy, mask, dx);
}
}
}
template<typename T>
class FusedReluDropoutGradKernel final : public user_op::OpKernel,
public user_op::CudaGraphSupport {
public:
FusedReluDropoutGradKernel() = default;
~FusedReluDropoutGradKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const float scale = ctx->Attr<float>("scale");
const int64_t cols = dy->shape_view().At(1);
const int64_t aux_ld = mask->shape_view().At(1) * 32;
const int64_t elem_cnt = dy->shape_view().elem_cnt();
LaunchVectorizedReluDropoutBackwardKernel<T>(
ctx->stream(), elem_cnt, cols, aux_ld, scale, reinterpret_cast<const T*>(dy->dptr()),
mask->dptr<int32_t>(), reinterpret_cast<T*>(dx->mut_dptr()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_FUSED_RELU_DROPOUT_GRAD_KERNEL_GPU(cpp_type, data_type) \
REGISTER_USER_KERNEL("fused_relu_dropout_grad") \
.SetCreateFn<FusedReluDropoutGradKernel<cpp_type>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("dx", 0) == data_type));
REGISTER_FUSED_RELU_DROPOUT_GRAD_KERNEL_GPU(float, DataType::kFloat)
REGISTER_FUSED_RELU_DROPOUT_GRAD_KERNEL_GPU(half, DataType::kFloat16)
#if TORCH_HIP_VERSION >= 11000
REGISTER_FUSED_RELU_DROPOUT_GRAD_KERNEL_GPU(nv_bfloat16, DataType::kBFloat16)
#endif
} // namespace
} // namespace oneflow
| 13ecc5badf1d69d63a5d2b27c928f2fb079ef869.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include <cuda.h>
#include "oneflow/core/kernel/cuda_graph_support.h"
#include "oneflow/core/cuda/elementwise.cuh"
namespace oneflow {
namespace {
constexpr int32_t kWarpSize = 32;
template<typename T, typename IndexType, int pack_size, bool tail>
__global__ void VectorizedReluDropoutBitmaskBackwardKernel(
const IndexType elem_cnt, const IndexType cols, const IndexType aux_ld, const float scale,
const IndexType n_tail, const IndexType tail_offset, const T* dy, const int32_t* mask, T* dx) {
int32_t global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
using LoadStoreType = cuda::elementwise::PackType<T, pack_size>;
using LoadStorePack = cuda::elementwise::Pack<T, pack_size>;
T t_scale = static_cast<T>(scale);
for (IndexType linear_pack_index = global_thread_id * pack_size; linear_pack_index < elem_cnt;
linear_pack_index += gridDim.x * blockDim.x * pack_size) {
const LoadStoreType* dy_load = reinterpret_cast<const LoadStoreType*>(dy + linear_pack_index);
LoadStorePack dy_vec;
dy_vec.storage = *dy_load;
LoadStorePack dx_vec;
#pragma unroll
for (int i = 0; i < pack_size; i++) {
const IndexType linear_index = (linear_pack_index + i);
const IndexType row = linear_index / cols;
const IndexType col = linear_index - row * cols;
const int32_t col_mod_warpsize = col % kWarpSize;
const IndexType aux_idx = ((row * aux_ld) + col) / kWarpSize;
bool is_positive = mask[aux_idx] & (1 << col_mod_warpsize);
dx_vec.elem[i] =
dy_vec.elem[i] * static_cast<T>(static_cast<float>(is_positive)) * static_cast<T>(scale);
}
*(reinterpret_cast<LoadStoreType*>(dx + linear_pack_index)) = dx_vec.storage;
}
if (tail && global_thread_id < n_tail) {
const IndexType tail_index = tail_offset + global_thread_id;
const IndexType tail_row = tail_index / cols;
const IndexType tail_col = tail_index - tail_row * cols;
const IndexType tail_col_mod_warpsize = tail_col % kWarpSize;
const IndexType tail_aux_idx = ((tail_row * aux_ld) + tail_col) / kWarpSize;
bool is_positive = mask[tail_aux_idx] & (1 << tail_col_mod_warpsize);
dx[tail_index] =
dy[tail_index] * static_cast<T>(static_cast<float>(is_positive)) * static_cast<T>(scale);
}
}
template<typename T>
void LaunchVectorizedReluDropoutBackwardKernel(ep::Stream* stream, const int64_t elem_cnt,
const int64_t cols, const int64_t aux_ld,
float scale, const T* dy, const int32_t* mask,
T* dx) {
constexpr int pack_size = cuda::elementwise::PackSize<T>();
const int64_t pack_num = elem_cnt / pack_size;
const int64_t tail_offset = pack_num * pack_size;
const int64_t n_tail = elem_cnt - tail_offset;
const bool tail = n_tail > 0 ? true : false;
if (tail) {
if (elem_cnt < GetMaxVal<int32_t>()) {
stream->As<ep::CudaStream>()->LaunchKernelDefaultWaves(
(VectorizedReluDropoutBitmaskBackwardKernel<T, int32_t, pack_size, true>),
std::max<int64_t>(1, pack_num), elem_cnt, cols, aux_ld, scale, n_tail, tail_offset, dy,
mask, dx);
} else {
stream->As<ep::CudaStream>()->LaunchKernelDefaultWaves(
(VectorizedReluDropoutBitmaskBackwardKernel<T, int64_t, pack_size, true>),
std::max<int64_t>(1, pack_num), elem_cnt, cols, aux_ld, scale, n_tail, tail_offset, dy,
mask, dx);
}
} else {
if (elem_cnt < GetMaxVal<int32_t>()) {
stream->As<ep::CudaStream>()->LaunchKernelDefaultWaves(
(VectorizedReluDropoutBitmaskBackwardKernel<T, int32_t, pack_size, false>),
std::max<int64_t>(1, pack_num), elem_cnt, cols, aux_ld, scale, /*n_tail=*/0, tail_offset,
dy, mask, dx);
} else {
stream->As<ep::CudaStream>()->LaunchKernelDefaultWaves(
(VectorizedReluDropoutBitmaskBackwardKernel<T, int64_t, pack_size, false>),
std::max<int64_t>(1, pack_num), elem_cnt, cols, aux_ld, scale, /*n_tail=*/0, tail_offset,
dy, mask, dx);
}
}
}
template<typename T>
class FusedReluDropoutGradKernel final : public user_op::OpKernel,
public user_op::CudaGraphSupport {
public:
FusedReluDropoutGradKernel() = default;
~FusedReluDropoutGradKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const float scale = ctx->Attr<float>("scale");
const int64_t cols = dy->shape_view().At(1);
const int64_t aux_ld = mask->shape_view().At(1) * 32;
const int64_t elem_cnt = dy->shape_view().elem_cnt();
LaunchVectorizedReluDropoutBackwardKernel<T>(
ctx->stream(), elem_cnt, cols, aux_ld, scale, reinterpret_cast<const T*>(dy->dptr()),
mask->dptr<int32_t>(), reinterpret_cast<T*>(dx->mut_dptr()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_FUSED_RELU_DROPOUT_GRAD_KERNEL_GPU(cpp_type, data_type) \
REGISTER_USER_KERNEL("fused_relu_dropout_grad") \
.SetCreateFn<FusedReluDropoutGradKernel<cpp_type>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("dx", 0) == data_type));
REGISTER_FUSED_RELU_DROPOUT_GRAD_KERNEL_GPU(float, DataType::kFloat)
REGISTER_FUSED_RELU_DROPOUT_GRAD_KERNEL_GPU(half, DataType::kFloat16)
#if CUDA_VERSION >= 11000
REGISTER_FUSED_RELU_DROPOUT_GRAD_KERNEL_GPU(nv_bfloat16, DataType::kBFloat16)
#endif
} // namespace
} // namespace oneflow
|
47e940b2775a21310ab8541bbde8534958e945aa.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/TensorIterator.h>
#include <c10/util/TypeSafeSignMath.h>
#include <type_traits>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void remainder_kernel_cuda(TensorIteratorBase& iter) {
if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t r = a % b;
if (r != 0 && c10::signs_differ(r, b)) {
r += b;
}
return r;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
auto mod = ::fmod(a, b);
if (mod != 0 && c10::signs_differ(b, mod)) {
mod += b;
}
return mod;
});
});
}
}
void fmod_kernel_cuda(TensorIteratorBase& iter) {
if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "fmod_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a % b;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "fmod_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
return ::fmod(a, b);
});
});
}
}
REGISTER_DISPATCH(remainder_stub, &remainder_kernel_cuda);
REGISTER_DISPATCH(fmod_stub, &fmod_kernel_cuda);
} // namespace at::native
| 47e940b2775a21310ab8541bbde8534958e945aa.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/TensorIterator.h>
#include <c10/util/TypeSafeSignMath.h>
#include <type_traits>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void remainder_kernel_cuda(TensorIteratorBase& iter) {
if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t r = a % b;
if (r != 0 && c10::signs_differ(r, b)) {
r += b;
}
return r;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
auto mod = ::fmod(a, b);
if (mod != 0 && c10::signs_differ(b, mod)) {
mod += b;
}
return mod;
});
});
}
}
void fmod_kernel_cuda(TensorIteratorBase& iter) {
if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "fmod_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a % b;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "fmod_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
return ::fmod(a, b);
});
});
}
}
REGISTER_DISPATCH(remainder_stub, &remainder_kernel_cuda);
REGISTER_DISPATCH(fmod_stub, &fmod_kernel_cuda);
} // namespace at::native
|
0ff386322fe01031a6b7395c9e1b76f91cbb1c65.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __HIPCC__
# error "A C or C++ compiler has been selected for CUDA"
#endif
#include "CMakeCompilerABI.h"
int main(int argc, char* argv[])
{
int require = 0;
require += info_sizeof_dptr[argc];
#if defined(ABI_ID)
require += info_abi[argc];
#endif
(void)argv;
return require;
}
| 0ff386322fe01031a6b7395c9e1b76f91cbb1c65.cu | #ifndef __CUDACC__
# error "A C or C++ compiler has been selected for CUDA"
#endif
#include "CMakeCompilerABI.h"
int main(int argc, char* argv[])
{
int require = 0;
require += info_sizeof_dptr[argc];
#if defined(ABI_ID)
require += info_abi[argc];
#endif
(void)argv;
return require;
}
|
dbab510f76978fd6983f3bf2ee7b4919a2e65896.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define SIZE 32
#ifndef PINNED
#define PINNED 0
#endif
// Matriz por Matriz
// C(NxM) <- A(NxP) * B (PxM)
__global__ void Kernel10(int N, int M, int P, float *A, float *B, float *C) {
__shared__ float sA[SIZE][SIZE];
__shared__ float sB[SIZE][SIZE];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by * SIZE + ty;
int col = bx * SIZE + tx;
float tmp = 0.0;
for (int m=0; m < P; m=m+SIZE) {
sA[ty][tx] = A[row*P + m + tx];
sB[ty][tx] = B[col + (m + ty)*M];
__syncthreads();
for (int k=0; k<SIZE; k++)
tmp += sA[ty][k] * sB[k][tx];
__syncthreads();
}
C[row*M+col] = tmp;
}
void InitM(int N, int M, float *Mat);
int TestMM(int N, int M, int P, float *A, float *B, float *C);
// Invocacion:
// ./ejecutable TAM test
// TAM es el la dimension de las matrices
// test == 'Y', comprueba que el resultado sea correcto
// test == 'N', NO comprueba que el resultado (Util para tomar tiempos)
// Por defecto, N = 2048, test == 'N'
int main(int argc, char** argv)
{
unsigned int N;
unsigned int numBytes;
unsigned int nBlocks, nThreads;
float TiempoTotal, TiempoKernel;
hipEvent_t E0, E1, E2, E3;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
char test;
// Dimension de las matrices NxN y comprobacion resultado
if (argc == 1) { test = 'N'; N = 2048; }
else if (argc == 2) { test = 'N'; N = atoi(argv[1]); }
else if (argc == 3) { test = *argv[2]; N = atoi(argv[1]); }
else { printf("Usage: ./exe TAM test\n"); exit(0); }
// numero de Threads en cada dimension
nThreads = SIZE;
// numero de Blocks en cada dimension
nBlocks = N/nThreads;
numBytes = N * N * sizeof(float);
dim3 dimGrid(nBlocks, nBlocks, 1);
dim3 dimBlock(nThreads, nThreads, 1);
hipEventCreate(&E0);
hipEventCreate(&E1);
hipEventCreate(&E2);
hipEventCreate(&E3);
if (PINNED) {
// Obtiene Memoria [pinned] en el host
hipHostMalloc((float**)&h_A, numBytes);
hipHostMalloc((float**)&h_B, numBytes);
hipHostMalloc((float**)&h_C, numBytes);
}
else {
// Obtener Memoria en el host
h_A = (float*) malloc(numBytes);
h_B = (float*) malloc(numBytes);
h_C = (float*) malloc(numBytes);
}
// Inicializa las matrices
InitM(N, N, h_A);
InitM(N, N, h_B);
hipEventRecord(E0, 0);
hipEventSynchronize(E0);
// Obtener Memoria en el device
hipMalloc((float**)&d_A, numBytes);
hipMalloc((float**)&d_B, numBytes);
hipMalloc((float**)&d_C, numBytes);
// Copiar datos desde el host en el device
hipMemcpy(d_A, h_A, numBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, numBytes, hipMemcpyHostToDevice);
hipEventRecord(E1, 0);
hipEventSynchronize(E1);
// Ejecutar el kernel
hipLaunchKernelGGL(( Kernel10), dim3(dimGrid), dim3(dimBlock), 0, 0, N, N, N, d_A, d_B, d_C);
hipEventRecord(E2, 0);
hipEventSynchronize(E2);
// Obtener el resultado desde el host
hipMemcpy(h_C, d_C, numBytes, hipMemcpyDeviceToHost);
// Liberar Memoria del device
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipEventRecord(E3, 0);
hipEventSynchronize(E3);
hipEventElapsedTime(&TiempoTotal, E0, E3);
hipEventElapsedTime(&TiempoKernel, E1, E2);
printf("\nKERNEL 10\n");
printf("Dimensiones: %dx%d\n", N, N);
printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads * nThreads);
printf("nBlocks: %dx%d (%d)\n", nBlocks, nBlocks, nBlocks*nBlocks);
if (PINNED) printf("Usando Pinned Memory\n");
else printf("NO usa Pinned Memory\n");
printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
printf("Rendimiento Global: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoTotal));
printf("Rendimiento Kernel: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoKernel));
hipEventDestroy(E0); hipEventDestroy(E1); hipEventDestroy(E2); hipEventDestroy(E3);
if (test == 'N')
printf ("NO TEST\n");
else if (TestMM(N, N, N, h_A, h_B, h_C))
printf ("TEST PASS\n");
else
printf ("TEST FAIL\n");
if (PINNED) {
hipHostFree(h_A); hipHostFree(h_B); hipHostFree(h_C);
}
else {
free(h_A); free(h_B); free(h_C);
}
}
void InitM(int N, int M, float *Mat) {
int i;
for (i=0; i<N*M; i++)
Mat[i] = rand() / (float) RAND_MAX;
}
int error(float a, float b) {
float tmp;
tmp = abs(a-b) / abs(min(a,b));
if (tmp > 0.0001) return 1;
else return 0;
}
int TestMM(int N, int M, int P, float *A, float *B, float *C) {
int i, j, k;
float tmp;
for (i=0; i<N; i++)
for (j=0; j<M; j++) {
tmp = 0.0;
for (k=0; k<P; k++)
tmp = tmp + A[i*P+k] * B[k*M+j];
if (error(tmp, C[i*M+j])) {
printf ("%d:%d: %f - %f = %f \n", i, j, tmp, C[i*M+j], abs(tmp - C[i*M+j]));
return 0;
}
}
return 1;
}
| dbab510f76978fd6983f3bf2ee7b4919a2e65896.cu | #include <stdio.h>
#include <stdlib.h>
#define SIZE 32
#ifndef PINNED
#define PINNED 0
#endif
// Matriz por Matriz
// C(NxM) <- A(NxP) * B (PxM)
__global__ void Kernel10(int N, int M, int P, float *A, float *B, float *C) {
__shared__ float sA[SIZE][SIZE];
__shared__ float sB[SIZE][SIZE];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by * SIZE + ty;
int col = bx * SIZE + tx;
float tmp = 0.0;
for (int m=0; m < P; m=m+SIZE) {
sA[ty][tx] = A[row*P + m + tx];
sB[ty][tx] = B[col + (m + ty)*M];
__syncthreads();
for (int k=0; k<SIZE; k++)
tmp += sA[ty][k] * sB[k][tx];
__syncthreads();
}
C[row*M+col] = tmp;
}
void InitM(int N, int M, float *Mat);
int TestMM(int N, int M, int P, float *A, float *B, float *C);
// Invocacion:
// ./ejecutable TAM test
// TAM es el la dimension de las matrices
// test == 'Y', comprueba que el resultado sea correcto
// test == 'N', NO comprueba que el resultado (Util para tomar tiempos)
// Por defecto, N = 2048, test == 'N'
int main(int argc, char** argv)
{
unsigned int N;
unsigned int numBytes;
unsigned int nBlocks, nThreads;
float TiempoTotal, TiempoKernel;
cudaEvent_t E0, E1, E2, E3;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
char test;
// Dimension de las matrices NxN y comprobacion resultado
if (argc == 1) { test = 'N'; N = 2048; }
else if (argc == 2) { test = 'N'; N = atoi(argv[1]); }
else if (argc == 3) { test = *argv[2]; N = atoi(argv[1]); }
else { printf("Usage: ./exe TAM test\n"); exit(0); }
// numero de Threads en cada dimension
nThreads = SIZE;
// numero de Blocks en cada dimension
nBlocks = N/nThreads;
numBytes = N * N * sizeof(float);
dim3 dimGrid(nBlocks, nBlocks, 1);
dim3 dimBlock(nThreads, nThreads, 1);
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventCreate(&E2);
cudaEventCreate(&E3);
if (PINNED) {
// Obtiene Memoria [pinned] en el host
cudaMallocHost((float**)&h_A, numBytes);
cudaMallocHost((float**)&h_B, numBytes);
cudaMallocHost((float**)&h_C, numBytes);
}
else {
// Obtener Memoria en el host
h_A = (float*) malloc(numBytes);
h_B = (float*) malloc(numBytes);
h_C = (float*) malloc(numBytes);
}
// Inicializa las matrices
InitM(N, N, h_A);
InitM(N, N, h_B);
cudaEventRecord(E0, 0);
cudaEventSynchronize(E0);
// Obtener Memoria en el device
cudaMalloc((float**)&d_A, numBytes);
cudaMalloc((float**)&d_B, numBytes);
cudaMalloc((float**)&d_C, numBytes);
// Copiar datos desde el host en el device
cudaMemcpy(d_A, h_A, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, numBytes, cudaMemcpyHostToDevice);
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
// Ejecutar el kernel
Kernel10<<<dimGrid, dimBlock>>>(N, N, N, d_A, d_B, d_C);
cudaEventRecord(E2, 0);
cudaEventSynchronize(E2);
// Obtener el resultado desde el host
cudaMemcpy(h_C, d_C, numBytes, cudaMemcpyDeviceToHost);
// Liberar Memoria del device
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaEventRecord(E3, 0);
cudaEventSynchronize(E3);
cudaEventElapsedTime(&TiempoTotal, E0, E3);
cudaEventElapsedTime(&TiempoKernel, E1, E2);
printf("\nKERNEL 10\n");
printf("Dimensiones: %dx%d\n", N, N);
printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads * nThreads);
printf("nBlocks: %dx%d (%d)\n", nBlocks, nBlocks, nBlocks*nBlocks);
if (PINNED) printf("Usando Pinned Memory\n");
else printf("NO usa Pinned Memory\n");
printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
printf("Rendimiento Global: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoTotal));
printf("Rendimiento Kernel: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoKernel));
cudaEventDestroy(E0); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3);
if (test == 'N')
printf ("NO TEST\n");
else if (TestMM(N, N, N, h_A, h_B, h_C))
printf ("TEST PASS\n");
else
printf ("TEST FAIL\n");
if (PINNED) {
cudaFreeHost(h_A); cudaFreeHost(h_B); cudaFreeHost(h_C);
}
else {
free(h_A); free(h_B); free(h_C);
}
}
void InitM(int N, int M, float *Mat) {
int i;
for (i=0; i<N*M; i++)
Mat[i] = rand() / (float) RAND_MAX;
}
int error(float a, float b) {
float tmp;
tmp = abs(a-b) / abs(min(a,b));
if (tmp > 0.0001) return 1;
else return 0;
}
int TestMM(int N, int M, int P, float *A, float *B, float *C) {
int i, j, k;
float tmp;
for (i=0; i<N; i++)
for (j=0; j<M; j++) {
tmp = 0.0;
for (k=0; k<P; k++)
tmp = tmp + A[i*P+k] * B[k*M+j];
if (error(tmp, C[i*M+j])) {
printf ("%d:%d: %f - %f = %f \n", i, j, tmp, C[i*M+j], abs(tmp - C[i*M+j]));
return 0;
}
}
return 1;
}
|
84d7b83a554f473e17f1840c044e1782b74e744b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Sample Program for CUDA 2.3
* written by M.Saito ([email protected])
*
* This sample uses texture reference.
* The generation speed of PRNG using texture is faster than using
* constant tabel on Geforce GTX 260.
*
* MTGP32-11213
* This program generates 32-bit unsigned integers.
* The period of generated integers is 2<sup>23209</sup>-1.
* This also generates single precision floating point numbers.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <shoverand/util/myCutil.h>
#include <stdint.h>
#include <inttypes.h>
#include <errno.h>
#include <stdlib.h>
#include "mtgp-cuda-common.h"
void printParams(mtgp32_params_fast_t* params);
/*
int get_suitable_block_num(int word_size, int thread_num, int large_size) {
hipDeviceProp_t dev;
hipDevice_t cuDevice;
int max_thread_dev;
int max_block, max_block_mem, max_block_dev;
int major, minor, ver;
myCutilSafeCall(hipGetDeviceProperties(&dev, 0));
hipDeviceGet(&cuDevice, 0);
hipDeviceComputeCapability(&major, &minor, cuDevice);
max_block_mem = dev.sharedMemPerBlock / (large_size * word_size);
if (major == 9999 && minor == 9999) {
return -1;
}
ver = major * 100 + minor;
if (ver <= 101) {
max_thread_dev = 768;
} else if (ver <= 103) {
max_thread_dev = 1024;
} else {
max_thread_dev = 1024;
}
max_block_dev = max_thread_dev / thread_num;
if (max_block_mem < max_block_dev) {
max_block = max_block_mem;
} else {
max_block = max_block_dev;
}
return max_block * dev.multiProcessorCount;
}
*/
/**
* This function initializes kernel I/O data.
* @param d_status output kernel I/O data.
* @param params MTGP32 parameters. needed for the initialization.
*/
void make_kernel_data(mtgp32_kernel_status_t *d_status,
mtgp32_params_fast_t params[],
int block_num) {
mtgp32_kernel_status_t* h_status = (mtgp32_kernel_status_t *) malloc(block_num *
sizeof(mtgp32_kernel_status_t));
if (h_status == NULL) {
printf("failure in allocating host memory for kernel I/O data.\n");
exit(8);
}
for (int i = 0; i < block_num; i++) {
// printParams(¶ms[i]);
mtgp32_init_state(&(h_status[i].status[0]), ¶ms[i], i + 1);
}
#if defined(DEBUG)
printf("h_status[0].status[0]:%08x\n", h_status[0].status[0]);
printf("h_status[0].status[1]:%08x\n", h_status[0].status[1]);
printf("h_status[0].status[2]:%08x\n", h_status[0].status[2]);
printf("h_status[0].status[3]:%08x\n\n", h_status[0].status[3]);
printf("h_status[1].status[0]:%08x\n", h_status[1].status[0]);
printf("h_status[1].status[1]:%08x\n", h_status[1].status[1]);
printf("h_status[1].status[2]:%08x\n", h_status[1].status[2]);
printf("h_status[1].status[3]:%08x\n\n", h_status[1].status[3]);
printf("h_status[2].status[0]:%08x\n", h_status[2].status[0]);
printf("h_status[2].status[1]:%08x\n", h_status[2].status[1]);
printf("h_status[2].status[2]:%08x\n", h_status[2].status[2]);
printf("h_status[2].status[3]:%08x\n\n", h_status[2].status[3]);
#endif
myCutilSafeCall(hipMemcpy(d_status,
h_status,
sizeof(mtgp32_kernel_status_t) * block_num,
hipMemcpyHostToDevice));
free(h_status);
}
void printParams(mtgp32_params_fast_t* params) {
printf ("Printing parameters:\n \
mexp: %d\n\
pos: %d\n\
sh1: %d\n\
sh2: %d\n", params->mexp, params->pos, params->sh1, params->sh2);
for (int i = 0; i < 16; ++i) printf ("tbl[%d]= %u\n", i, params->tbl[i]);
for (int i = 0; i < 16; ++i) printf ("tmp_tbl[%d]= %u\n", i, params->tmp_tbl[i]);
for (int i = 0; i < 16; ++i) printf ("flt_tmp_tbl[%d]= %u\n", i, params->flt_tmp_tbl[i]);
printf("mask: %u\n", params->mask);
for (int i = 0; i < 21; ++i) printf ("poly_sha1[%d]= %d\n", i, params->poly_sha1[i]);
}
| 84d7b83a554f473e17f1840c044e1782b74e744b.cu | /*
* Sample Program for CUDA 2.3
* written by M.Saito ([email protected])
*
* This sample uses texture reference.
* The generation speed of PRNG using texture is faster than using
* constant tabel on Geforce GTX 260.
*
* MTGP32-11213
* This program generates 32-bit unsigned integers.
* The period of generated integers is 2<sup>23209</sup>-1.
* This also generates single precision floating point numbers.
*/
#include <stdio.h>
#include <cuda.h>
#include <shoverand/util/myCutil.h>
#include <stdint.h>
#include <inttypes.h>
#include <errno.h>
#include <stdlib.h>
#include "mtgp-cuda-common.h"
void printParams(mtgp32_params_fast_t* params);
/*
int get_suitable_block_num(int word_size, int thread_num, int large_size) {
cudaDeviceProp dev;
CUdevice cuDevice;
int max_thread_dev;
int max_block, max_block_mem, max_block_dev;
int major, minor, ver;
myCutilSafeCall(cudaGetDeviceProperties(&dev, 0));
cuDeviceGet(&cuDevice, 0);
cuDeviceComputeCapability(&major, &minor, cuDevice);
max_block_mem = dev.sharedMemPerBlock / (large_size * word_size);
if (major == 9999 && minor == 9999) {
return -1;
}
ver = major * 100 + minor;
if (ver <= 101) {
max_thread_dev = 768;
} else if (ver <= 103) {
max_thread_dev = 1024;
} else {
max_thread_dev = 1024;
}
max_block_dev = max_thread_dev / thread_num;
if (max_block_mem < max_block_dev) {
max_block = max_block_mem;
} else {
max_block = max_block_dev;
}
return max_block * dev.multiProcessorCount;
}
*/
/**
* This function initializes kernel I/O data.
* @param d_status output kernel I/O data.
* @param params MTGP32 parameters. needed for the initialization.
*/
void make_kernel_data(mtgp32_kernel_status_t *d_status,
mtgp32_params_fast_t params[],
int block_num) {
mtgp32_kernel_status_t* h_status = (mtgp32_kernel_status_t *) malloc(block_num *
sizeof(mtgp32_kernel_status_t));
if (h_status == NULL) {
printf("failure in allocating host memory for kernel I/O data.\n");
exit(8);
}
for (int i = 0; i < block_num; i++) {
// printParams(¶ms[i]);
mtgp32_init_state(&(h_status[i].status[0]), ¶ms[i], i + 1);
}
#if defined(DEBUG)
printf("h_status[0].status[0]:%08x\n", h_status[0].status[0]);
printf("h_status[0].status[1]:%08x\n", h_status[0].status[1]);
printf("h_status[0].status[2]:%08x\n", h_status[0].status[2]);
printf("h_status[0].status[3]:%08x\n\n", h_status[0].status[3]);
printf("h_status[1].status[0]:%08x\n", h_status[1].status[0]);
printf("h_status[1].status[1]:%08x\n", h_status[1].status[1]);
printf("h_status[1].status[2]:%08x\n", h_status[1].status[2]);
printf("h_status[1].status[3]:%08x\n\n", h_status[1].status[3]);
printf("h_status[2].status[0]:%08x\n", h_status[2].status[0]);
printf("h_status[2].status[1]:%08x\n", h_status[2].status[1]);
printf("h_status[2].status[2]:%08x\n", h_status[2].status[2]);
printf("h_status[2].status[3]:%08x\n\n", h_status[2].status[3]);
#endif
myCutilSafeCall(cudaMemcpy(d_status,
h_status,
sizeof(mtgp32_kernel_status_t) * block_num,
cudaMemcpyHostToDevice));
free(h_status);
}
void printParams(mtgp32_params_fast_t* params) {
printf ("Printing parameters:\n \
mexp: %d\n\
pos: %d\n\
sh1: %d\n\
sh2: %d\n", params->mexp, params->pos, params->sh1, params->sh2);
for (int i = 0; i < 16; ++i) printf ("tbl[%d]= %u\n", i, params->tbl[i]);
for (int i = 0; i < 16; ++i) printf ("tmp_tbl[%d]= %u\n", i, params->tmp_tbl[i]);
for (int i = 0; i < 16; ++i) printf ("flt_tmp_tbl[%d]= %u\n", i, params->flt_tmp_tbl[i]);
printf("mask: %u\n", params->mask);
for (int i = 0; i < 21; ++i) printf ("poly_sha1[%d]= %d\n", i, params->poly_sha1[i]);
}
|
a21d8df423140d47672edcd7c118ad6e5016fe16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_runtime.h"
#include "vec_kernels.cuh"
#include "stddef.h"
#include <cmath>
__global__
void mat_transpose(double *X, double *Xt, size_t m, size_t n)
{
size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid >= m*n)
return;
size_t row = gid / n;
size_t col = gid % n;
Xt[col * m + row] = X[row * n + col];
}
__global__
void vec_dot_mat(double *X, double *y, double *out, size_t m, size_t n)
{
size_t row_idx = threadIdx.x + blockDim.x * blockIdx.x;
if (row_idx >= m)
return;
out[row_idx] = 0.0;
double accum = 0.0;
for (size_t i = 0; i < n; i++) {
accum += X[row_idx * n + i] * y[i];
}
out[row_idx] = accum;
}
__global__
void vec_add(double *a, double *b, double *out, size_t stride, size_t n)
{
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n)
out[idx] = a[idx] + b[idx];
}
__global__
void vec_dot_product(double *a, double *b, double *out, size_t stride, size_t n)
{
extern __shared__ double temp[];
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
temp[tid] = (idx < n) ? a[idx] * b[idx] : 0;
__syncthreads();
for (size_t shf = blockDim.x / 2; shf > 0; shf >>= 1) {
if (tid < shf) {
temp[tid] += temp[tid + shf];
}
__syncthreads();
}
if (tid == 0)
out[blockIdx.x] = temp[0];
}
__global__
void vec_scalar_mul(double *a, double *out, double c, size_t stride, size_t n)
{
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n)
out[idx] = a[idx] * c;
}
__global__
void vec_sigmoid(double *a, double *out, size_t stride, size_t n)
{
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n)
out[idx] = 1 / (1 + exp(-a[idx]));
}
__global__
void vec_logloss(double *h, double *y, double *out, size_t stride, size_t n)
{
extern __shared__ double temp[];
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n) {
temp[tid] = -(y[idx]) * log(h[idx]) - (1 - (y[idx])) * log(1 - (h[idx]));
} else {
temp[tid] = 0;
}
__syncthreads();
for (size_t shf = blockDim.x / 2; shf > 0; shf >>= 1) {
if (tid < shf) {
temp[tid] += temp[tid + shf];
}
__syncthreads();
}
if (tid == 0)
out[blockIdx.x] = temp[0];
}
__global__
void vec_dot_asym(double *a, double *b, double *out, size_t a_stride, size_t b_stride, size_t a_n, size_t b_n)
{
extern __shared__ double temp[];
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t a_idx = tid * a_stride;
size_t b_idx = tid * b_stride;
if (a_idx < a_n && b_idx < b_n)
temp[tid] = a[a_idx] * b[b_idx];
else
temp[tid] = 0;
__syncthreads();
for (size_t shf = blockDim.x / 2; shf > 0; shf >>= 1) {
if (tid < shf)
temp[tid] += temp[tid + shf];
__syncthreads();
}
if (tid == 0)
out[blockIdx.x] = temp[0];
}
| a21d8df423140d47672edcd7c118ad6e5016fe16.cu | #include "cuda_runtime.h"
#include "vec_kernels.cuh"
#include "stddef.h"
#include <cmath>
__global__
void mat_transpose(double *X, double *Xt, size_t m, size_t n)
{
size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid >= m*n)
return;
size_t row = gid / n;
size_t col = gid % n;
Xt[col * m + row] = X[row * n + col];
}
__global__
void vec_dot_mat(double *X, double *y, double *out, size_t m, size_t n)
{
size_t row_idx = threadIdx.x + blockDim.x * blockIdx.x;
if (row_idx >= m)
return;
out[row_idx] = 0.0;
double accum = 0.0;
for (size_t i = 0; i < n; i++) {
accum += X[row_idx * n + i] * y[i];
}
out[row_idx] = accum;
}
__global__
void vec_add(double *a, double *b, double *out, size_t stride, size_t n)
{
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n)
out[idx] = a[idx] + b[idx];
}
__global__
void vec_dot_product(double *a, double *b, double *out, size_t stride, size_t n)
{
extern __shared__ double temp[];
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
temp[tid] = (idx < n) ? a[idx] * b[idx] : 0;
__syncthreads();
for (size_t shf = blockDim.x / 2; shf > 0; shf >>= 1) {
if (tid < shf) {
temp[tid] += temp[tid + shf];
}
__syncthreads();
}
if (tid == 0)
out[blockIdx.x] = temp[0];
}
__global__
void vec_scalar_mul(double *a, double *out, double c, size_t stride, size_t n)
{
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n)
out[idx] = a[idx] * c;
}
__global__
void vec_sigmoid(double *a, double *out, size_t stride, size_t n)
{
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n)
out[idx] = 1 / (1 + exp(-a[idx]));
}
__global__
void vec_logloss(double *h, double *y, double *out, size_t stride, size_t n)
{
extern __shared__ double temp[];
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n) {
temp[tid] = -(y[idx]) * log(h[idx]) - (1 - (y[idx])) * log(1 - (h[idx]));
} else {
temp[tid] = 0;
}
__syncthreads();
for (size_t shf = blockDim.x / 2; shf > 0; shf >>= 1) {
if (tid < shf) {
temp[tid] += temp[tid + shf];
}
__syncthreads();
}
if (tid == 0)
out[blockIdx.x] = temp[0];
}
__global__
void vec_dot_asym(double *a, double *b, double *out, size_t a_stride, size_t b_stride, size_t a_n, size_t b_n)
{
extern __shared__ double temp[];
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t a_idx = tid * a_stride;
size_t b_idx = tid * b_stride;
if (a_idx < a_n && b_idx < b_n)
temp[tid] = a[a_idx] * b[b_idx];
else
temp[tid] = 0;
__syncthreads();
for (size_t shf = blockDim.x / 2; shf > 0; shf >>= 1) {
if (tid < shf)
temp[tid] += temp[tid + shf];
__syncthreads();
}
if (tid == 0)
out[blockIdx.x] = temp[0];
}
|
3b058feaefa3e5d726e6e6ca8862d5f35ca668b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <addition.h>
#include <hip/hip_runtime.h>
__global__
void add(int n, const float *x, const float *y, float* result)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
result[i] = x[i] + y[i];
}
void GpuAdd(const float* x, const float* y, float* result, const size_t N)
{
float* gpuX;
float* gpuY;
float* gpuResult;
hipMallocManaged(&gpuX, N * sizeof(float));
hipMallocManaged(&gpuY, N * sizeof(float));
hipMallocManaged(&gpuResult, N * sizeof(float));
hipMemcpy(gpuX, x, N, hipMemcpyHostToDevice);
hipMemcpy(gpuY, y, N, hipMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add << <numBlocks, blockSize >> > (N, gpuX, gpuY, gpuResult);
hipDeviceSynchronize();
hipMemcpy(gpuResult, result, N, hipMemcpyDeviceToHost);
hipFree(gpuX);
hipFree(gpuY);
hipFree(gpuResult);
} | 3b058feaefa3e5d726e6e6ca8862d5f35ca668b3.cu | #include <addition.h>
#include <cuda.h>
__global__
void add(int n, const float *x, const float *y, float* result)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
result[i] = x[i] + y[i];
}
void GpuAdd(const float* x, const float* y, float* result, const size_t N)
{
float* gpuX;
float* gpuY;
float* gpuResult;
cudaMallocManaged(&gpuX, N * sizeof(float));
cudaMallocManaged(&gpuY, N * sizeof(float));
cudaMallocManaged(&gpuResult, N * sizeof(float));
cudaMemcpy(gpuX, x, N, cudaMemcpyHostToDevice);
cudaMemcpy(gpuY, y, N, cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add << <numBlocks, blockSize >> > (N, gpuX, gpuY, gpuResult);
cudaDeviceSynchronize();
cudaMemcpy(gpuResult, result, N, cudaMemcpyDeviceToHost);
cudaFree(gpuX);
cudaFree(gpuY);
cudaFree(gpuResult);
} |
a5e4de59f0b32c22b15f079b4a791e1833f74c8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "costVolume.cuh"
__host__ int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); }
void compute_cost(unsigned char* i1, unsigned char* i2, float* cost, int w1, int w2, int h1, int h2, int dmin, bool host_gpu_compare) {
int size_d = D_MAX - D_MIN + 1;
int size_cost = h1 * w1*size_d;
unsigned char* d_i1;
unsigned char* d_i2;
float* d_xder1;
float* d_xder2;
float* d_cost;
float* derivative1 = (float*)malloc(h1*w1 * sizeof(float));
float* derivative2 = (float*)malloc(h2*w2 * sizeof(float));
memset(derivative1, 0.0f, h1*w1 * sizeof(float));
memset(derivative2, 0.0f, h2*w2 * sizeof(float));
memset(cost, 0.0f, size_d*h2*w2 * sizeof(float));
CHECK(hipMalloc((unsigned char**)&d_i1, w1 * h1));
CHECK(hipMalloc((unsigned char**)&d_i2, w2 * h2));
CHECK(hipMalloc((void**)&d_xder1, w1 * h1 * sizeof(float)));
CHECK(hipMalloc((void**)&d_xder2, w2 * h2 * sizeof(float)));
CHECK(hipMalloc((void**)&d_cost, size_cost * sizeof(float)));
CHECK(hipMemcpy(d_i1, i1, w1 * h1, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_i2, i2, w2 * h2, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_xder1, derivative1, w1 * h1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_xder2, derivative2, w2 * h2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_cost, cost, sizeof(float)*size_cost, hipMemcpyHostToDevice));
dim3 blockDim1(1024);
dim3 gridDim1((h1*w1 + blockDim1.x - 1) / blockDim1.x);
dim3 blockDim2(1024);
dim3 gridDim2((h2*w2 + blockDim2.x - 1) / blockDim2.x);
x_derivativeOnGPU << <gridDim1, blockDim1 >> > (d_i1, d_xder1, w1, h1);
x_derivativeOnGPU << <gridDim2, blockDim2 >> > (d_i2, d_xder2, w2, h2);
CHECK(hipMemcpy(derivative1, d_xder1, h1*w1 * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(derivative2, d_xder2, h2*w2 * sizeof(float), hipMemcpyDeviceToHost));
dim3 blockDim(16, size_d);
dim3 gridDim;
//gridDim.x = (w1*h1 + blockDim.x - 1)/blockDim.x;
gridDim.x = iDivUp(w1*h1, blockDim.x);
gridDim.y = 1;//size_d;
costVolumOnGPU2 << <gridDim, blockDim >> > (d_i1, d_i2, d_cost, d_xder1, d_xder2, w1, w2, h1, h2, size_d, dmin);
CHECK(hipDeviceSynchronize());
// check kernel error
CHECK(hipGetLastError());
CHECK(hipMemcpy(cost, d_cost, size_cost * sizeof(float), hipMemcpyDeviceToHost));
//host side
if (host_gpu_compare) {
float* h_cost = (float*)malloc(size_cost * sizeof(float));
float* h_derivative1 = (float*)malloc(h1*w1* sizeof(float));
float* h_derivative2 = (float*)malloc(h2*w2 * sizeof(float));
memset(h_cost, 0, sizeof(float)*(size_cost));
memset(h_derivative1, 0, sizeof(float)*(w1*h1));
memset(h_derivative2, 0, sizeof(float)*(w2*h2));
x_derivativeOnCpu(i1, h_derivative1, w1, h1);
x_derivativeOnCpu(i2, h_derivative2, w2, h2);
compute_costVolumeOnCpu(i1, i2, h_cost, h_derivative1, h_derivative2, w1, w2, h1, h2, size_d, dmin);
bool verif = check_errors(h_cost, cost, size_cost);
if (verif) cout << "Cost Volume ok!" << endl;
free(h_cost);
free(h_derivative1);
free(h_derivative2);
}
// free device global memory
CHECK(hipFree(d_cost));
CHECK(hipFree(d_i1));
CHECK(hipFree(d_i2));
CHECK(hipFree(d_xder1));
CHECK(hipFree(d_xder2));
free(derivative1);
free(derivative2);
}
/**
void disparity_selection(float* filtered_cost, float* best_cost, float* disparity_map, const int w, const int h, const int dmin,bool host_gpu_compare) {
const int size_d = D_MAX - D_MIN + 1;
const int n = w * h;
int n_fl = n * sizeof(float);
float* d_filtered_cost;
float* d_best_cost;
float* d_dmap;
CHECK(hipMalloc((void**)&d_best_cost, n_fl));
CHECK(hipMalloc((void**)&d_filtered_cost, size_d*n_fl));
CHECK(hipMalloc((void**)&d_dmap, n_fl));
CHECK(hipMemcpy(d_filtered_cost, filtered_cost, size_d*n_fl, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_dmap, disparity_map, n_fl, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_best_cost, best_cost, n_fl, hipMemcpyHostToDevice));
dim3 blockDim(1024);
dim3 gridDim((n +blockDim.x -1)/blockDim.x);
//gridDim.x = (w1*h1 + blockDim.x - 1)/blockDim.x;
selectionOnGpu<< <gridDim, blockDim >> > (d_filtered_cost, d_best_cost, d_dmap, n, size_d, dmin);
CHECK(hipDeviceSynchronize());
// check kernel error
CHECK(hipGetLastError());
CHECK(hipMemcpy(best_cost, d_best_cost, n_fl, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(disparity_map, d_dmap, n_fl, hipMemcpyDeviceToHost));
// free device global memory
CHECK(hipFree(d_best_cost));
CHECK(hipFree(d_dmap));
CHECK(hipFree(d_filtered_cost));
}
__global__ void selectionOnGpu(float* filt_cost, float* best_cost, float* dmap, const int n, const int dsize, const int dmin) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
int offset = n;
if (i < n) {
for (int j = 0; j < dsize; j++) {
if (1.0f*best_cost[i] > 1.0f*filt_cost[i + j * n]) {
best_cost[i] = filt_cost[i + j * n];
dmap[i] = dmin + j;
}
}
}
}
**/
void costVolumeOnCPU(unsigned char* i1, unsigned char* i2, float* cost, int w1, int w2, int h1, int h2, int size_d, int dmin) {
float alpha = 1.0f*ALPHA;
float th_color = 1.0f*TH_color;
float th_grad = 1.0f*TH_grad;
for (int z = 0; z < size_d; z++) {
for (int y = 0; y < h1; y++) {
for (int x = 0; x < w1; x++) {
int index = y * w1 + x;
int id = z * w1*h1 + index;
float c = (1.0f - alpha) * th_color + alpha *1.0f*th_grad;
int d = dmin + z;
if ((x + d < w2) && (x + d >= 0)) {
float diff_term = 1.0f*abs(i1[index] - i2[index + d]);
float grad_1 = x_derivativeCPU(i1, x, index, w1);
float grad_2 = x_derivativeCPU(i2, x + d, index + d, w2);
float grad_term = abs(grad_1 - grad_2);
c = (1 - alpha)*min(diff_term, th_color) + alpha * min(grad_term, th_grad);
}
cost[id] = c;
}
}
}
}
__global__ void costVolumOnGPU2(unsigned char* i1, unsigned char* i2, float* cost, float* derivative1,float* derivative2,int w1, int w2, int h1, int h2, int size_d, int dmin) {
// x threads for pixels [0, w*h]
int x = blockDim.x*blockIdx.x + threadIdx.x;
// y threads for d [0, size_d]
int y = blockDim.y*blockIdx.y + threadIdx.y;
float alpha = 1.0f*ALPHA;
float th_color = 1.0f*TH_color;
float th_grad = 1.0f*TH_grad;
// row index in the image
int idx = x % w1;
// col index in the image
int idy = x / w1;
// index [0, w*h*size_d]
int id = y * w1*h1 + x;
// d candidate [dmin, dmax]
int d = dmin + y;
if (y < size_d && x < w1*h1) {
// threshold
float c = (1 - alpha) * th_color + 1.0f*alpha * th_grad;
if (((idx + d) < w2) && ((idx + d) >= 0))
{
c = (1.0f - alpha)*min(1.0f*(abs((int)i1[x] - (int)i2[x + d])), th_color) + alpha * min(1.0f*(abs(derivative1[x] - derivative2[x + d])), th_grad);
}
cost[id] = c;
//printf("%f\n", c);
//float* q;
//// TODO filter
//q[id] = 0;
//__syncthreads();
//// disparity selection - blockDim should be SIZE_1D !!!
//// fill with 0
//__shared__ float bestDisparity[SIZE_1D];
//// fill with 100000
//__shared__ float bestCost[SIZE_1D];
//bestDisparity[threadIdx.x] = 0;
//bestCost[threadIdx.x] = 0;
//__syncthreads();
//if (q[id] < bestCost[threadIdx.x])
//{
// bestCost[threadIdx.x] = q[id];
// bestDisparity[threadIdx.x] = d;
//}
//__syncthreads();
//// output to add in param - size w*h - fill with 0
//float* disparityMap;
//disparityMap[x] = bestDisparity[threadIdx.x];
}
//extern __shared__ float temp[];
//// for shared memory
//int tdx = threadIdx.x;
//// to cumSum one row - for w = 1080, we need 540 threads
//int idx = blockIdx.x * blockDim.x + threadIdx.x;
//// for each row
//int idy = blockIdx.y * blockDim.y + threadIdx.y;
//int idxEven = idx * 2 + idy * w;
//int idxOdd = idx * 2 + 1 + idy * w;
//int offset = 1;
//temp[2 * tdx] = input[idxEven];
//temp[2 * tdx + 1] = input[idxOdd];
//for (int nSum = B_SIZE / 2; nSum > 0; nSum /= 2)
//{
// __syncthreads();
// if (tdx < nSum)
// {
// int a = offset * (2 * tdx + 1) - 1;
// int b = offset * (2 * tdx + 2) - 1;
// temp[b] += temp[a];
// }
// offset *= 2;
//}
//__syncthreads();
////Write output (size h)
//output[2 * tdx] = temp[2 * tdx];
//output[2 * tdx + 1] = temp[2 * tdx + 1];
}
__device__ int id_im(int i, int j, int width) {
return j * width + i;
}
__device__ int id_cost(int i, int j, int width, int height, int k) {
return k * width*height + j * width + i;
}
__device__ float x_derivative(unsigned char* im, int col_index, int index, int width) {
if ((col_index + 1) < width && (col_index - 1) >= 0)
{
return ((float)(im[index + 1] - im[index - 1]) / 2);
}
else if (col_index + 1 == width)
{
return ((float)(im[index] - im[index - 1]) / 2);
}
else if (col_index - 1 == -1)
{
return ((float)(im[index + 1] - im[index]) / 2);
}
}
__host__ float x_derivativeCPU(unsigned char* im, int col_index, int index, int width) {
if ((col_index + 1) < width && (col_index - 1) >= 0)
{
return ((float)(im[index + 1] - im[index - 1]) / 2);
}
else if (col_index + 1 >= width)
{
return ((float)(im[index] - im[index - 1]) / 2);
}
else
{
return ((float)(im[index + 1] - im[index]) / 2);
}
}
__device__ int difference_term(unsigned char pixel_i, unsigned char pixel_j) {
return min(abs((int)(pixel_i - pixel_j)), TH_color);
}
__device__ float difference_term_2(float pixel_i, float pixel_j) {
return min(abs(pixel_i - pixel_j), 1.0f*TH_grad);
}
__device__ int getGlobalIdx_1D_2D()
{
return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__host__ void compute_costVolumeOnCpu(unsigned char* i1, unsigned char* i2, float* cost, float* derivative1, float* derivative2, int w1, int w2, int h1, int h2, int size_d, int dmin) {
float alpha = 1.0f*ALPHA;
float th_color = 1.0f*TH_color;
float th_grad = 1.0f*TH_grad;
for (int k = 0; k < size_d; k++) {
for (int j = 0; j < h1; j++) {
for (int i = 0; i < w1; i++) {
int index = j * w1 + i;
int id = k * w1*h1 + index;
float c = (1.0f - alpha) * th_color + alpha * (1.0f*th_grad);
int d = dmin + k;
if ((i + d < w2) && (i + d >= 0)) {
float diff_term = 1.0f*abs(i1[index] - i2[index + d]);
float grad_1 = 1.0f*derivative1[index];
float grad_2 = 1.0f*derivative2[index + d];
float grad_term = 1.0f*abs(grad_1 - grad_2);
c = (1.0f - alpha)*min(diff_term, th_color) + alpha * min(grad_term, th_grad);
}
cost[id] = c;
}
}
}
}
__host__ void x_derivativeOnCpu(unsigned char* in, float* out, int w, int h) {
for (int j = 0; j < h; j++) {
for (int i = 0; i < w; i++) {
int id = j * w + i;
int c1 = 0;
int c2 = 0;
if (i - 1 >= 0 && i + 1 < w) {
c1 = (int)(in[id + 1]);
c2 = (int)(in[id - 1]);
}
else if (i + 1 >= w) {
c1 = (int)(in[id]);
c2 = (int)(in[id -1]);
}
else{
c1 = (int)(in[id+1]);
c2 = (int)(in[id]);
}
out[id] = 1.0f*(c2 - c1) / 2;
}
}
}
__global__ void x_derivativeOnGPU(unsigned char* in, float* out, int w, int h) {
int id = blockIdx.x*blockDim.x + threadIdx.x;
int i = id % w;
if (id < w * h) {
float c1 = 0;
float c2 = 0;
if (i - 1 >= 0 && i + 1 < w) {
c1 = (int)(in[id + 1]);
c2 = (int)(in[id - 1]);
}
else if (i + 1 >= w) {
c1 = (int)(in[id]);
c2 = (int)(in[id - 1]);
}
else if (i -1 <= -1){
c1 = (int)(in[id + 1]);
c2 = (int)(in[id]);
}
out[id] = 1.0f*(c2 - c1) / 2;
}
}
/**
__global__ void x_derivativeOnGPU(unsigned char* in, unsigned char* out, const int w, const int h) {
int tdx = threadIdx.x;
int id = blockIdx.x*blockDim.y + threadIdx.y;
int idx = id % w;
int idy = id / w;
__shared__ float s_f[TILE_WIDTH][3];
for (int i = 0; i < TILE_WIDTH; i++) {
if ((idx - 1) >= 0 && idx + 1 < w) {
s_f[i][0] = in[idx - 1];
s_f[i][1] = in[idx];
s_f[i][2] = in[idx + 1];
}
else if (idx + 1 == w) {
s_f[i][0] = in[idx - 1];
s_f[i][1] = in[idx];
s_f[i][2] = in[idx];
}
else if (idx - 1 == 0) {
s_f[i][0] = in[idx];
s_f[i][1] = in[idx];
s_f[i][2] = in[idx + 1];
}
__syncthreads();
s_f[i][1] = (s_f[i][2] - s_f[i][0]) / 2;
out[id] = s_f[i][1];
}
}
**/ | a5e4de59f0b32c22b15f079b4a791e1833f74c8f.cu | #include "costVolume.cuh"
__host__ int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); }
void compute_cost(unsigned char* i1, unsigned char* i2, float* cost, int w1, int w2, int h1, int h2, int dmin, bool host_gpu_compare) {
int size_d = D_MAX - D_MIN + 1;
int size_cost = h1 * w1*size_d;
unsigned char* d_i1;
unsigned char* d_i2;
float* d_xder1;
float* d_xder2;
float* d_cost;
float* derivative1 = (float*)malloc(h1*w1 * sizeof(float));
float* derivative2 = (float*)malloc(h2*w2 * sizeof(float));
memset(derivative1, 0.0f, h1*w1 * sizeof(float));
memset(derivative2, 0.0f, h2*w2 * sizeof(float));
memset(cost, 0.0f, size_d*h2*w2 * sizeof(float));
CHECK(cudaMalloc((unsigned char**)&d_i1, w1 * h1));
CHECK(cudaMalloc((unsigned char**)&d_i2, w2 * h2));
CHECK(cudaMalloc((void**)&d_xder1, w1 * h1 * sizeof(float)));
CHECK(cudaMalloc((void**)&d_xder2, w2 * h2 * sizeof(float)));
CHECK(cudaMalloc((void**)&d_cost, size_cost * sizeof(float)));
CHECK(cudaMemcpy(d_i1, i1, w1 * h1, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_i2, i2, w2 * h2, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_xder1, derivative1, w1 * h1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_xder2, derivative2, w2 * h2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_cost, cost, sizeof(float)*size_cost, cudaMemcpyHostToDevice));
dim3 blockDim1(1024);
dim3 gridDim1((h1*w1 + blockDim1.x - 1) / blockDim1.x);
dim3 blockDim2(1024);
dim3 gridDim2((h2*w2 + blockDim2.x - 1) / blockDim2.x);
x_derivativeOnGPU << <gridDim1, blockDim1 >> > (d_i1, d_xder1, w1, h1);
x_derivativeOnGPU << <gridDim2, blockDim2 >> > (d_i2, d_xder2, w2, h2);
CHECK(cudaMemcpy(derivative1, d_xder1, h1*w1 * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(derivative2, d_xder2, h2*w2 * sizeof(float), cudaMemcpyDeviceToHost));
dim3 blockDim(16, size_d);
dim3 gridDim;
//gridDim.x = (w1*h1 + blockDim.x - 1)/blockDim.x;
gridDim.x = iDivUp(w1*h1, blockDim.x);
gridDim.y = 1;//size_d;
costVolumOnGPU2 << <gridDim, blockDim >> > (d_i1, d_i2, d_cost, d_xder1, d_xder2, w1, w2, h1, h2, size_d, dmin);
CHECK(cudaDeviceSynchronize());
// check kernel error
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(cost, d_cost, size_cost * sizeof(float), cudaMemcpyDeviceToHost));
//host side
if (host_gpu_compare) {
float* h_cost = (float*)malloc(size_cost * sizeof(float));
float* h_derivative1 = (float*)malloc(h1*w1* sizeof(float));
float* h_derivative2 = (float*)malloc(h2*w2 * sizeof(float));
memset(h_cost, 0, sizeof(float)*(size_cost));
memset(h_derivative1, 0, sizeof(float)*(w1*h1));
memset(h_derivative2, 0, sizeof(float)*(w2*h2));
x_derivativeOnCpu(i1, h_derivative1, w1, h1);
x_derivativeOnCpu(i2, h_derivative2, w2, h2);
compute_costVolumeOnCpu(i1, i2, h_cost, h_derivative1, h_derivative2, w1, w2, h1, h2, size_d, dmin);
bool verif = check_errors(h_cost, cost, size_cost);
if (verif) cout << "Cost Volume ok!" << endl;
free(h_cost);
free(h_derivative1);
free(h_derivative2);
}
// free device global memory
CHECK(cudaFree(d_cost));
CHECK(cudaFree(d_i1));
CHECK(cudaFree(d_i2));
CHECK(cudaFree(d_xder1));
CHECK(cudaFree(d_xder2));
free(derivative1);
free(derivative2);
}
/**
void disparity_selection(float* filtered_cost, float* best_cost, float* disparity_map, const int w, const int h, const int dmin,bool host_gpu_compare) {
const int size_d = D_MAX - D_MIN + 1;
const int n = w * h;
int n_fl = n * sizeof(float);
float* d_filtered_cost;
float* d_best_cost;
float* d_dmap;
CHECK(cudaMalloc((void**)&d_best_cost, n_fl));
CHECK(cudaMalloc((void**)&d_filtered_cost, size_d*n_fl));
CHECK(cudaMalloc((void**)&d_dmap, n_fl));
CHECK(cudaMemcpy(d_filtered_cost, filtered_cost, size_d*n_fl, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_dmap, disparity_map, n_fl, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_best_cost, best_cost, n_fl, cudaMemcpyHostToDevice));
dim3 blockDim(1024);
dim3 gridDim((n +blockDim.x -1)/blockDim.x);
//gridDim.x = (w1*h1 + blockDim.x - 1)/blockDim.x;
selectionOnGpu<< <gridDim, blockDim >> > (d_filtered_cost, d_best_cost, d_dmap, n, size_d, dmin);
CHECK(cudaDeviceSynchronize());
// check kernel error
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(best_cost, d_best_cost, n_fl, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(disparity_map, d_dmap, n_fl, cudaMemcpyDeviceToHost));
// free device global memory
CHECK(cudaFree(d_best_cost));
CHECK(cudaFree(d_dmap));
CHECK(cudaFree(d_filtered_cost));
}
__global__ void selectionOnGpu(float* filt_cost, float* best_cost, float* dmap, const int n, const int dsize, const int dmin) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
int offset = n;
if (i < n) {
for (int j = 0; j < dsize; j++) {
if (1.0f*best_cost[i] > 1.0f*filt_cost[i + j * n]) {
best_cost[i] = filt_cost[i + j * n];
dmap[i] = dmin + j;
}
}
}
}
**/
void costVolumeOnCPU(unsigned char* i1, unsigned char* i2, float* cost, int w1, int w2, int h1, int h2, int size_d, int dmin) {
float alpha = 1.0f*ALPHA;
float th_color = 1.0f*TH_color;
float th_grad = 1.0f*TH_grad;
for (int z = 0; z < size_d; z++) {
for (int y = 0; y < h1; y++) {
for (int x = 0; x < w1; x++) {
int index = y * w1 + x;
int id = z * w1*h1 + index;
float c = (1.0f - alpha) * th_color + alpha *1.0f*th_grad;
int d = dmin + z;
if ((x + d < w2) && (x + d >= 0)) {
float diff_term = 1.0f*abs(i1[index] - i2[index + d]);
float grad_1 = x_derivativeCPU(i1, x, index, w1);
float grad_2 = x_derivativeCPU(i2, x + d, index + d, w2);
float grad_term = abs(grad_1 - grad_2);
c = (1 - alpha)*min(diff_term, th_color) + alpha * min(grad_term, th_grad);
}
cost[id] = c;
}
}
}
}
__global__ void costVolumOnGPU2(unsigned char* i1, unsigned char* i2, float* cost, float* derivative1,float* derivative2,int w1, int w2, int h1, int h2, int size_d, int dmin) {
// x threads for pixels [0, w*h]
int x = blockDim.x*blockIdx.x + threadIdx.x;
// y threads for d [0, size_d]
int y = blockDim.y*blockIdx.y + threadIdx.y;
float alpha = 1.0f*ALPHA;
float th_color = 1.0f*TH_color;
float th_grad = 1.0f*TH_grad;
// row index in the image
int idx = x % w1;
// col index in the image
int idy = x / w1;
// index [0, w*h*size_d]
int id = y * w1*h1 + x;
// d candidate [dmin, dmax]
int d = dmin + y;
if (y < size_d && x < w1*h1) {
// threshold
float c = (1 - alpha) * th_color + 1.0f*alpha * th_grad;
if (((idx + d) < w2) && ((idx + d) >= 0))
{
c = (1.0f - alpha)*min(1.0f*(abs((int)i1[x] - (int)i2[x + d])), th_color) + alpha * min(1.0f*(abs(derivative1[x] - derivative2[x + d])), th_grad);
}
cost[id] = c;
//printf("%f\n", c);
//float* q;
//// TODO filter
//q[id] = 0;
//__syncthreads();
//// disparity selection - blockDim should be SIZE_1D !!!
//// fill with 0
//__shared__ float bestDisparity[SIZE_1D];
//// fill with 100000
//__shared__ float bestCost[SIZE_1D];
//bestDisparity[threadIdx.x] = 0;
//bestCost[threadIdx.x] = 0;
//__syncthreads();
//if (q[id] < bestCost[threadIdx.x])
//{
// bestCost[threadIdx.x] = q[id];
// bestDisparity[threadIdx.x] = d;
//}
//__syncthreads();
//// output to add in param - size w*h - fill with 0
//float* disparityMap;
//disparityMap[x] = bestDisparity[threadIdx.x];
}
//extern __shared__ float temp[];
//// for shared memory
//int tdx = threadIdx.x;
//// to cumSum one row - for w = 1080, we need 540 threads
//int idx = blockIdx.x * blockDim.x + threadIdx.x;
//// for each row
//int idy = blockIdx.y * blockDim.y + threadIdx.y;
//int idxEven = idx * 2 + idy * w;
//int idxOdd = idx * 2 + 1 + idy * w;
//int offset = 1;
//temp[2 * tdx] = input[idxEven];
//temp[2 * tdx + 1] = input[idxOdd];
//for (int nSum = B_SIZE / 2; nSum > 0; nSum /= 2)
//{
// __syncthreads();
// if (tdx < nSum)
// {
// int a = offset * (2 * tdx + 1) - 1;
// int b = offset * (2 * tdx + 2) - 1;
// temp[b] += temp[a];
// }
// offset *= 2;
//}
//__syncthreads();
////Write output (size h)
//output[2 * tdx] = temp[2 * tdx];
//output[2 * tdx + 1] = temp[2 * tdx + 1];
}
__device__ int id_im(int i, int j, int width) {
return j * width + i;
}
__device__ int id_cost(int i, int j, int width, int height, int k) {
return k * width*height + j * width + i;
}
__device__ float x_derivative(unsigned char* im, int col_index, int index, int width) {
if ((col_index + 1) < width && (col_index - 1) >= 0)
{
return ((float)(im[index + 1] - im[index - 1]) / 2);
}
else if (col_index + 1 == width)
{
return ((float)(im[index] - im[index - 1]) / 2);
}
else if (col_index - 1 == -1)
{
return ((float)(im[index + 1] - im[index]) / 2);
}
}
__host__ float x_derivativeCPU(unsigned char* im, int col_index, int index, int width) {
if ((col_index + 1) < width && (col_index - 1) >= 0)
{
return ((float)(im[index + 1] - im[index - 1]) / 2);
}
else if (col_index + 1 >= width)
{
return ((float)(im[index] - im[index - 1]) / 2);
}
else
{
return ((float)(im[index + 1] - im[index]) / 2);
}
}
__device__ int difference_term(unsigned char pixel_i, unsigned char pixel_j) {
return min(abs((int)(pixel_i - pixel_j)), TH_color);
}
__device__ float difference_term_2(float pixel_i, float pixel_j) {
return min(abs(pixel_i - pixel_j), 1.0f*TH_grad);
}
__device__ int getGlobalIdx_1D_2D()
{
return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__host__ void compute_costVolumeOnCpu(unsigned char* i1, unsigned char* i2, float* cost, float* derivative1, float* derivative2, int w1, int w2, int h1, int h2, int size_d, int dmin) {
float alpha = 1.0f*ALPHA;
float th_color = 1.0f*TH_color;
float th_grad = 1.0f*TH_grad;
for (int k = 0; k < size_d; k++) {
for (int j = 0; j < h1; j++) {
for (int i = 0; i < w1; i++) {
int index = j * w1 + i;
int id = k * w1*h1 + index;
float c = (1.0f - alpha) * th_color + alpha * (1.0f*th_grad);
int d = dmin + k;
if ((i + d < w2) && (i + d >= 0)) {
float diff_term = 1.0f*abs(i1[index] - i2[index + d]);
float grad_1 = 1.0f*derivative1[index];
float grad_2 = 1.0f*derivative2[index + d];
float grad_term = 1.0f*abs(grad_1 - grad_2);
c = (1.0f - alpha)*min(diff_term, th_color) + alpha * min(grad_term, th_grad);
}
cost[id] = c;
}
}
}
}
__host__ void x_derivativeOnCpu(unsigned char* in, float* out, int w, int h) {
for (int j = 0; j < h; j++) {
for (int i = 0; i < w; i++) {
int id = j * w + i;
int c1 = 0;
int c2 = 0;
if (i - 1 >= 0 && i + 1 < w) {
c1 = (int)(in[id + 1]);
c2 = (int)(in[id - 1]);
}
else if (i + 1 >= w) {
c1 = (int)(in[id]);
c2 = (int)(in[id -1]);
}
else{
c1 = (int)(in[id+1]);
c2 = (int)(in[id]);
}
out[id] = 1.0f*(c2 - c1) / 2;
}
}
}
__global__ void x_derivativeOnGPU(unsigned char* in, float* out, int w, int h) {
int id = blockIdx.x*blockDim.x + threadIdx.x;
int i = id % w;
if (id < w * h) {
float c1 = 0;
float c2 = 0;
if (i - 1 >= 0 && i + 1 < w) {
c1 = (int)(in[id + 1]);
c2 = (int)(in[id - 1]);
}
else if (i + 1 >= w) {
c1 = (int)(in[id]);
c2 = (int)(in[id - 1]);
}
else if (i -1 <= -1){
c1 = (int)(in[id + 1]);
c2 = (int)(in[id]);
}
out[id] = 1.0f*(c2 - c1) / 2;
}
}
/**
__global__ void x_derivativeOnGPU(unsigned char* in, unsigned char* out, const int w, const int h) {
int tdx = threadIdx.x;
int id = blockIdx.x*blockDim.y + threadIdx.y;
int idx = id % w;
int idy = id / w;
__shared__ float s_f[TILE_WIDTH][3];
for (int i = 0; i < TILE_WIDTH; i++) {
if ((idx - 1) >= 0 && idx + 1 < w) {
s_f[i][0] = in[idx - 1];
s_f[i][1] = in[idx];
s_f[i][2] = in[idx + 1];
}
else if (idx + 1 == w) {
s_f[i][0] = in[idx - 1];
s_f[i][1] = in[idx];
s_f[i][2] = in[idx];
}
else if (idx - 1 == 0) {
s_f[i][0] = in[idx];
s_f[i][1] = in[idx];
s_f[i][2] = in[idx + 1];
}
__syncthreads();
s_f[i][1] = (s_f[i][2] - s_f[i][0]) / 2;
out[id] = s_f[i][1];
}
}
**/ |
e5270a8ae16ed04abd4746038bc81979ed5ebbe1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "deform_conv_op.hpp"
namespace Shadow {
namespace Vision {
template <typename T>
__device__ T deform_im2col_bilinear(const T *bottom_data, int data_width,
int height, int width, T h, T w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (T)h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (T)w_low;
} else {
w_high = w_low + 1;
}
T lh = h - h_low;
T lw = w - w_low;
T hh = 1 - lh, hw = 1 - lw;
T v1 = bottom_data[h_low * data_width + w_low];
T v2 = bottom_data[h_low * data_width + w_high];
T v3 = bottom_data[h_high * data_width + w_low];
T v4 = bottom_data[h_high * data_width + w_high];
T w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void deform_im2col_gpu_kernel(
int n, const T *data_im, const T *data_offset, int im_offset, int height,
int width, int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h,
int stride_w, int dilation_h, int dilation_w, int zero_point,
int channel_per_deform_group, int height_col, int width_col, T *data_col) {
CUDA_KERNEL_LOOP(globalid, n) {
int w_col = globalid % width_col;
int h_col = (globalid / width_col) % height_col;
int c_im = (globalid / width_col) / height_col;
int c_col = c_im * kernel_h * kernel_w;
int deform_group_index = c_im / channel_per_deform_group;
int h_in = h_col * stride_h - pad_h;
int w_in = w_col * stride_w - pad_w;
T *data_col_ptr =
data_col + (c_col * height_col + h_col) * width_col + w_col;
const T *data_im_ptr =
data_im + im_offset + (c_im * height + h_in) * width + w_in;
const T *data_offset_ptr = data_offset + deform_group_index * 2 * kernel_h *
kernel_w * height_col *
width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
T offset_h = data_offset_ptr[data_offset_h_ptr];
T offset_w = data_offset_ptr[data_offset_w_ptr];
T val = static_cast<T>(zero_point);
T h_im = h_in + i * dilation_h + offset_h;
T w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
T map_h = i * dilation_h + offset_h;
T map_w = j * dilation_w + offset_w;
int cur_height = height - h_in;
int cur_width = width - w_in;
val = deform_im2col_bilinear(data_im_ptr, width, cur_height,
cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename T>
void DeformIm2Col(const T *in_data, const VecInt &in_shape,
const T *offset_data, int offset, int deform_group,
int kernel_size, int stride, int pad, int dilation,
int zero_point, const VecInt &out_shape, T *out_data,
Context *context) {
int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3];
int out_h = out_shape[2], out_w = out_shape[3];
int channel_per_deform_group = in_c / deform_group;
int count = in_c * out_h * out_w;
hipLaunchKernelGGL(( deform_im2col_gpu_kernel<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0,
hipStream_t(context->cuda_stream()),
count, in_data, offset_data, offset, in_h, in_w, kernel_size, kernel_size,
pad, pad, stride, stride, dilation, dilation, zero_point,
channel_per_deform_group, out_h, out_w, out_data);
CUDA_CHECK(hipPeekAtLastError());
}
template void DeformIm2Col(const float *, const VecInt &, const float *, int,
int, int, int, int, int, int, const VecInt &,
float *, Context *);
} // namespace Vision
} // namespace Shadow
| e5270a8ae16ed04abd4746038bc81979ed5ebbe1.cu | #include "deform_conv_op.hpp"
namespace Shadow {
namespace Vision {
template <typename T>
__device__ T deform_im2col_bilinear(const T *bottom_data, int data_width,
int height, int width, T h, T w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (T)h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (T)w_low;
} else {
w_high = w_low + 1;
}
T lh = h - h_low;
T lw = w - w_low;
T hh = 1 - lh, hw = 1 - lw;
T v1 = bottom_data[h_low * data_width + w_low];
T v2 = bottom_data[h_low * data_width + w_high];
T v3 = bottom_data[h_high * data_width + w_low];
T v4 = bottom_data[h_high * data_width + w_high];
T w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void deform_im2col_gpu_kernel(
int n, const T *data_im, const T *data_offset, int im_offset, int height,
int width, int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h,
int stride_w, int dilation_h, int dilation_w, int zero_point,
int channel_per_deform_group, int height_col, int width_col, T *data_col) {
CUDA_KERNEL_LOOP(globalid, n) {
int w_col = globalid % width_col;
int h_col = (globalid / width_col) % height_col;
int c_im = (globalid / width_col) / height_col;
int c_col = c_im * kernel_h * kernel_w;
int deform_group_index = c_im / channel_per_deform_group;
int h_in = h_col * stride_h - pad_h;
int w_in = w_col * stride_w - pad_w;
T *data_col_ptr =
data_col + (c_col * height_col + h_col) * width_col + w_col;
const T *data_im_ptr =
data_im + im_offset + (c_im * height + h_in) * width + w_in;
const T *data_offset_ptr = data_offset + deform_group_index * 2 * kernel_h *
kernel_w * height_col *
width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
T offset_h = data_offset_ptr[data_offset_h_ptr];
T offset_w = data_offset_ptr[data_offset_w_ptr];
T val = static_cast<T>(zero_point);
T h_im = h_in + i * dilation_h + offset_h;
T w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
T map_h = i * dilation_h + offset_h;
T map_w = j * dilation_w + offset_w;
int cur_height = height - h_in;
int cur_width = width - w_in;
val = deform_im2col_bilinear(data_im_ptr, width, cur_height,
cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename T>
void DeformIm2Col(const T *in_data, const VecInt &in_shape,
const T *offset_data, int offset, int deform_group,
int kernel_size, int stride, int pad, int dilation,
int zero_point, const VecInt &out_shape, T *out_data,
Context *context) {
int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3];
int out_h = out_shape[2], out_w = out_shape[3];
int channel_per_deform_group = in_c / deform_group;
int count = in_c * out_h * out_w;
deform_im2col_gpu_kernel<T><<<GetBlocks(count), NumThreads, 0,
cudaStream_t(context->cuda_stream())>>>(
count, in_data, offset_data, offset, in_h, in_w, kernel_size, kernel_size,
pad, pad, stride, stride, dilation, dilation, zero_point,
channel_per_deform_group, out_h, out_w, out_data);
CUDA_CHECK(cudaPeekAtLastError());
}
template void DeformIm2Col(const float *, const VecInt &, const float *, int,
int, int, int, int, int, int, const VecInt &,
float *, Context *);
} // namespace Vision
} // namespace Shadow
|
ce5a4fd394e43811d9ad5cace3a964f828381448.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* 2D Convolution: C = A (*) B, A is the 5x5 kernel matrix, B is the image matrix.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <2Dconvolution_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
Matrix A;
Matrix B;
Matrix C;
srand(2012);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1);
B = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1);
C = AllocateMatrix(B.height, B.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL;
unsigned int data_read = 0;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 2)
{
printf("Error reading parameter file\n");
cutFree(params);
return 1;
}
A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0);
B = AllocateMatrix(params[0], params[1], 0);
C = AllocateMatrix(params[0], params[1], 0);
cutFree(params);
(void)ReadFile(&A, argv[2]);
(void)ReadFile(&B, argv[3]);
}
// Convolution on the device
ConvolutionOnDevice(A, B, C);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(C.height, C.width, 0);
computeGold(reference.elements, A.elements, B.elements, B.height, B.width);
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, C.elements, C.width * C.height, 0.0001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(C, argv[4]);
}
else if(argc == 2)
{
WriteFile(C, argv[1]);
}
// Free matrices
FreeMatrix(&A);
FreeMatrix(&B);
FreeMatrix(&C);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
// __device__ __constant__ float* Ad;
void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C)
{
//Load A and B to the device
//Matrix Ad = AllocateDeviceMatrix(A);
//CopyToDeviceMatrix(Ad, A);
hipMalloc((void**)&Ad, KERNEL_SIZE*KERNEL_SIZE*sizeof(float));
hipMemcpyToSymbol(Ad, A.elements, KERNEL_SIZE*KERNEL_SIZE*sizeof(float));
Matrix Bd = AllocateDeviceMatrix(B);
CopyToDeviceMatrix(Bd, B);
//Allocate C on the device
Matrix Cd = AllocateDeviceMatrix(C);
CopyToDeviceMatrix(Cd, C); // Clear memory
//Setup the execution configuration
//Launch the device computation threads!
int blocks = B.height;
int threads = B.width;
hipEvent_t start, stop;
float elapsedTime=0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( ConvolutionKernel), dim3(blocks), dim3(threads), 0, 0, Bd,Cd);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("The execution time of GPU is :%f\n",elapsedTime);
//Read C from the device
CopyFromDeviceMatrix(C, Cd);
FreeDeviceMatrix(&Bd);
FreeDeviceMatrix(&Cd);
}
//Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
//Allocate a device matrix of dimensions height*width
//If init == 0, initialize to all zeroes.
//If init == 1, perform random initialization.
//If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
//don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
if(rand() % 2)
M.elements[i] = - M.elements[i];
}
return M;
}
//Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height * M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f);
}
| ce5a4fd394e43811d9ad5cace3a964f828381448.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* 2D Convolution: C = A (*) B, A is the 5x5 kernel matrix, B is the image matrix.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <2Dconvolution_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
Matrix A;
Matrix B;
Matrix C;
srand(2012);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1);
B = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1);
C = AllocateMatrix(B.height, B.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL;
unsigned int data_read = 0;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 2)
{
printf("Error reading parameter file\n");
cutFree(params);
return 1;
}
A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0);
B = AllocateMatrix(params[0], params[1], 0);
C = AllocateMatrix(params[0], params[1], 0);
cutFree(params);
(void)ReadFile(&A, argv[2]);
(void)ReadFile(&B, argv[3]);
}
// Convolution on the device
ConvolutionOnDevice(A, B, C);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(C.height, C.width, 0);
computeGold(reference.elements, A.elements, B.elements, B.height, B.width);
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, C.elements, C.width * C.height, 0.0001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(C, argv[4]);
}
else if(argc == 2)
{
WriteFile(C, argv[1]);
}
// Free matrices
FreeMatrix(&A);
FreeMatrix(&B);
FreeMatrix(&C);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
// __device__ __constant__ float* Ad;
void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C)
{
//Load A and B to the device
//Matrix Ad = AllocateDeviceMatrix(A);
//CopyToDeviceMatrix(Ad, A);
cudaMalloc((void**)&Ad, KERNEL_SIZE*KERNEL_SIZE*sizeof(float));
cudaMemcpyToSymbol(Ad, A.elements, KERNEL_SIZE*KERNEL_SIZE*sizeof(float));
Matrix Bd = AllocateDeviceMatrix(B);
CopyToDeviceMatrix(Bd, B);
//Allocate C on the device
Matrix Cd = AllocateDeviceMatrix(C);
CopyToDeviceMatrix(Cd, C); // Clear memory
//Setup the execution configuration
//Launch the device computation threads!
int blocks = B.height;
int threads = B.width;
cudaEvent_t start, stop;
float elapsedTime=0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
ConvolutionKernel<<<blocks, threads>>>(Bd,Cd);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("The execution time of GPU is :%f\n",elapsedTime);
//Read C from the device
CopyFromDeviceMatrix(C, Cd);
FreeDeviceMatrix(&Bd);
FreeDeviceMatrix(&Cd);
}
//Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
//Allocate a device matrix of dimensions height*width
//If init == 0, initialize to all zeroes.
//If init == 1, perform random initialization.
//If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
//don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
if(rand() % 2)
M.elements[i] = - M.elements[i];
}
return M;
}
//Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height * M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f);
}
|
38dab6cc74ca498d1731ece73c9f7a0ab71a9383.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/utils/random.hpp>
namespace nbla {
// Generate rand(low, high) values from output of hiprandGenerateUniform.
// hiprandGenerateUniform returns random values in (0, 1], but we need [low,
// high).
template <typename T>
__global__ void kernel_rand_post_process(int size, T *dev_ptr, T low, T high) {
NBLA_CUDA_KERNEL_LOOP(idx, size) {
dev_ptr[idx] = (T(1) - dev_ptr[idx]) * (high - low) + low;
}
}
static __global__ void kernel_randint_post_process(int size, int *dev_ptr,
int low, int high) {
float *f_ptr = reinterpret_cast<float *>(dev_ptr);
NBLA_CUDA_KERNEL_LOOP(idx, size) {
dev_ptr[idx] = (1.0f - f_ptr[idx]) * (high - low) + low;
}
}
template <>
void curand_generate_rand<float>(hiprandGenerator_t gen, float low, float high,
float *dev_ptr, size_t size) {
NBLA_CURAND_CHECK(hiprandGenerateUniform(gen, dev_ptr, size));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_rand_post_process<float>), size,
dev_ptr, low, high);
}
template <>
void curand_generate_rand<int>(hiprandGenerator_t gen, int low, int high,
int *dev_ptr, size_t size) {
NBLA_CURAND_CHECK(
hiprandGenerateUniform(gen, reinterpret_cast<float *>(dev_ptr), size));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_randint_post_process, size, dev_ptr,
low, high);
}
}
| 38dab6cc74ca498d1731ece73c9f7a0ab71a9383.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/utils/random.hpp>
namespace nbla {
// Generate rand(low, high) values from output of curandGenerateUniform.
// curandGenerateUniform returns random values in (0, 1], but we need [low,
// high).
template <typename T>
__global__ void kernel_rand_post_process(int size, T *dev_ptr, T low, T high) {
NBLA_CUDA_KERNEL_LOOP(idx, size) {
dev_ptr[idx] = (T(1) - dev_ptr[idx]) * (high - low) + low;
}
}
static __global__ void kernel_randint_post_process(int size, int *dev_ptr,
int low, int high) {
float *f_ptr = reinterpret_cast<float *>(dev_ptr);
NBLA_CUDA_KERNEL_LOOP(idx, size) {
dev_ptr[idx] = (1.0f - f_ptr[idx]) * (high - low) + low;
}
}
template <>
void curand_generate_rand<float>(curandGenerator_t gen, float low, float high,
float *dev_ptr, size_t size) {
NBLA_CURAND_CHECK(curandGenerateUniform(gen, dev_ptr, size));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_rand_post_process<float>), size,
dev_ptr, low, high);
}
template <>
void curand_generate_rand<int>(curandGenerator_t gen, int low, int high,
int *dev_ptr, size_t size) {
NBLA_CURAND_CHECK(
curandGenerateUniform(gen, reinterpret_cast<float *>(dev_ptr), size));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_randint_post_process, size, dev_ptr,
low, high);
}
}
|
a1fa912a4bf920b84b57f03938c544963067314a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by yangyueji on 3/21/17.
//
#include "GPUMemManager.h"
using namespace std;
GPUMemManager::~GPUMemManager() {
hipFree(this->dev_nodeReverseList_raw);
hipFree(this->dev_reverseEdgeList_raw);
hipFree(this->dev_nodeList_raw);
hipFree(this->dev_edgeList_raw);
hipFree(this->dev_edgeprob_raw);
hipFree(this->all_states);
}
void GPUMemManager::initDeviceMem(GraphStruct &graphStruct) {
this->nodeSize = graphStruct.nodesSize;
hipMalloc((void**)&this->dev_nodeReverseList_raw, this->nodeSize * sizeof(uint32_t));
hipMalloc((void**)&this->dev_nodeList_raw, this->nodeSize * sizeof(uint32_t));
//edgeList
this->edgeSize = graphStruct.edgesListSize;
hipMalloc((void**)&this->dev_reverseEdgeList_raw, this->edgeSize * sizeof(uint64_t));
hipMemcpy(this->dev_reverseEdgeList_raw, graphStruct.reverseEdgesList, sizeof(uint64_t) * this->edgeSize, hipMemcpyHostToDevice);
hipMalloc((void**)&this->dev_edgeList_raw, this->edgeSize * sizeof(uint64_t));
hipMemcpy(this->dev_edgeList_raw, graphStruct.edgesList, sizeof(uint64_t) * this->edgeSize, hipMemcpyHostToDevice);
hipMalloc((void**)&this->dev_edgeprob_raw, this->edgeSize * sizeof(uint16_t));
hipMemcpy(this->dev_edgeprob_raw, graphStruct.edgesProb, sizeof(uint16_t) * this->edgeSize, hipMemcpyHostToDevice);
}
void GPUMemManager::sortEdgesOnDev(processingType ptype) {
unsigned long long startTime_sortEdgeList = getTime();
switch(ptype) {
case normalEdgeProcessing: {
thrust::device_ptr<uint64_t> dev_edgeList_ptr = thrust::device_pointer_cast(this->dev_edgeList_raw);
thrust::sort(dev_edgeList_ptr, dev_edgeList_ptr + this->edgeSize);
break;
}
case reverseEdgeProcessing: {
thrust::device_ptr<uint64_t> dev_reverseEdgeList_ptr = thrust::device_pointer_cast(
this->dev_reverseEdgeList_raw);
thrust::device_ptr<uint16_t> dev_edgeprob_ptr = thrust::device_pointer_cast(this->dev_edgeprob_raw);
thrust::sort_by_key(dev_reverseEdgeList_ptr, dev_reverseEdgeList_ptr + this->edgeSize, dev_edgeprob_ptr);
break;
}
default:
std::cout << "no such type!" << std::endl;
exit(1);
}
unsigned long long endTime_sortEdgeList = getTime();
cout << "******Edge List sorting time = " << getInterval(startTime_sortEdgeList, endTime_sortEdgeList) << "ms." << endl;
}
void GPUMemManager::init_randomStates(uint32_t maxGrid, uint32_t maxBlock, uint64_t seed) {
hipMalloc((void**)&this->all_states, sizeof(hiprandState_t) * maxBlock * maxGrid);
hipLaunchKernelGGL(( GPUKernels::setup_random_state), dim3(dim3(maxGrid)), dim3(dim3(maxBlock)), 0, 0, this->all_states, seed);
}
void GPUMemManager::setNodeListOnDev(uint32_t grid_dim, uint32_t block_dim, processingType ptype) {
dim3 _grid_dim(grid_dim);
dim3 _block_dim(block_dim);
uint32_t maxThreadNum = _grid_dim.x * _block_dim.x;
uint32_t maxJobsPerThread = this->edgeSize / maxThreadNum;
uint32_t num_threads_one_more_job = this->edgeSize % maxThreadNum;
unsigned long long startTime_appendAdjList = getTime();
switch (ptype) {
case normalEdgeProcessing: {
hipLaunchKernelGGL(( GPUKernels::setNodeListOnDev) , dim3(_grid_dim), dim3(_block_dim) , 0, 0, this->nodeSize,
this->edgeSize,
this->dev_nodeList_raw,
this->dev_edgeList_raw,
maxJobsPerThread,
num_threads_one_more_job);
hipDeviceSynchronize();
break;
}
case reverseEdgeProcessing: {
hipLaunchKernelGGL(( GPUKernels::setNodeListOnDev) , dim3(_grid_dim), dim3(_block_dim) , 0, 0, this->nodeSize,
this->edgeSize,
this->dev_nodeReverseList_raw,
this->dev_reverseEdgeList_raw,
maxJobsPerThread,
num_threads_one_more_job);
hipDeviceSynchronize();
break;
}
default:
std::cout << "no such type!" << std::endl;
exit(1);
}
unsigned long long endTime_appendAdjList = getTime();
cout << "******Appending adjacent lists to node lists time = " << getInterval(startTime_appendAdjList, endTime_appendAdjList) << "ms." << endl;
} | a1fa912a4bf920b84b57f03938c544963067314a.cu | //
// Created by yangyueji on 3/21/17.
//
#include "GPUMemManager.h"
using namespace std;
GPUMemManager::~GPUMemManager() {
cudaFree(this->dev_nodeReverseList_raw);
cudaFree(this->dev_reverseEdgeList_raw);
cudaFree(this->dev_nodeList_raw);
cudaFree(this->dev_edgeList_raw);
cudaFree(this->dev_edgeprob_raw);
cudaFree(this->all_states);
}
void GPUMemManager::initDeviceMem(GraphStruct &graphStruct) {
this->nodeSize = graphStruct.nodesSize;
cudaMalloc((void**)&this->dev_nodeReverseList_raw, this->nodeSize * sizeof(uint32_t));
cudaMalloc((void**)&this->dev_nodeList_raw, this->nodeSize * sizeof(uint32_t));
//edgeList
this->edgeSize = graphStruct.edgesListSize;
cudaMalloc((void**)&this->dev_reverseEdgeList_raw, this->edgeSize * sizeof(uint64_t));
cudaMemcpy(this->dev_reverseEdgeList_raw, graphStruct.reverseEdgesList, sizeof(uint64_t) * this->edgeSize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&this->dev_edgeList_raw, this->edgeSize * sizeof(uint64_t));
cudaMemcpy(this->dev_edgeList_raw, graphStruct.edgesList, sizeof(uint64_t) * this->edgeSize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&this->dev_edgeprob_raw, this->edgeSize * sizeof(uint16_t));
cudaMemcpy(this->dev_edgeprob_raw, graphStruct.edgesProb, sizeof(uint16_t) * this->edgeSize, cudaMemcpyHostToDevice);
}
void GPUMemManager::sortEdgesOnDev(processingType ptype) {
unsigned long long startTime_sortEdgeList = getTime();
switch(ptype) {
case normalEdgeProcessing: {
thrust::device_ptr<uint64_t> dev_edgeList_ptr = thrust::device_pointer_cast(this->dev_edgeList_raw);
thrust::sort(dev_edgeList_ptr, dev_edgeList_ptr + this->edgeSize);
break;
}
case reverseEdgeProcessing: {
thrust::device_ptr<uint64_t> dev_reverseEdgeList_ptr = thrust::device_pointer_cast(
this->dev_reverseEdgeList_raw);
thrust::device_ptr<uint16_t> dev_edgeprob_ptr = thrust::device_pointer_cast(this->dev_edgeprob_raw);
thrust::sort_by_key(dev_reverseEdgeList_ptr, dev_reverseEdgeList_ptr + this->edgeSize, dev_edgeprob_ptr);
break;
}
default:
std::cout << "no such type!" << std::endl;
exit(1);
}
unsigned long long endTime_sortEdgeList = getTime();
cout << "******Edge List sorting time = " << getInterval(startTime_sortEdgeList, endTime_sortEdgeList) << "ms." << endl;
}
void GPUMemManager::init_randomStates(uint32_t maxGrid, uint32_t maxBlock, uint64_t seed) {
cudaMalloc((void**)&this->all_states, sizeof(curandState) * maxBlock * maxGrid);
GPUKernels::setup_random_state<<<dim3(maxGrid), dim3(maxBlock)>>>(this->all_states, seed);
}
void GPUMemManager::setNodeListOnDev(uint32_t grid_dim, uint32_t block_dim, processingType ptype) {
dim3 _grid_dim(grid_dim);
dim3 _block_dim(block_dim);
uint32_t maxThreadNum = _grid_dim.x * _block_dim.x;
uint32_t maxJobsPerThread = this->edgeSize / maxThreadNum;
uint32_t num_threads_one_more_job = this->edgeSize % maxThreadNum;
unsigned long long startTime_appendAdjList = getTime();
switch (ptype) {
case normalEdgeProcessing: {
GPUKernels::setNodeListOnDev <<< _grid_dim, _block_dim >>> (this->nodeSize,
this->edgeSize,
this->dev_nodeList_raw,
this->dev_edgeList_raw,
maxJobsPerThread,
num_threads_one_more_job);
cudaDeviceSynchronize();
break;
}
case reverseEdgeProcessing: {
GPUKernels::setNodeListOnDev <<< _grid_dim, _block_dim >>> (this->nodeSize,
this->edgeSize,
this->dev_nodeReverseList_raw,
this->dev_reverseEdgeList_raw,
maxJobsPerThread,
num_threads_one_more_job);
cudaDeviceSynchronize();
break;
}
default:
std::cout << "no such type!" << std::endl;
exit(1);
}
unsigned long long endTime_appendAdjList = getTime();
cout << "******Appending adjacent lists to node lists time = " << getInterval(startTime_appendAdjList, endTime_appendAdjList) << "ms." << endl;
} |
a2bb441d1f8c8bb11249d2ac76e4173e88eecacd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file calculateForce.cu
* \author Christopher Minar ([email protected])
* \brief
*/
#include "calculateForce.h"
namespace kernels
{
__global__//kernel should be of size totalPoints
void force_pressure(double *force_pressure, double *body_intercept_p,
double *body_intercept_p_x, double *body_intercept_p_y,
double *bx, double *by, double *xv, double *yu, int *ghostTagsP,
int *i_start, int *j_start, int width, int height, int totalPoints, int nx, int ny, double midX, double midY)
{
//initialise
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= totalPoints)
return;
int ii = i_start[0],
jj = j_start[0],
ip;
int I0,
If,
J0,
Jf,
count = 0,
thetaID,
minID,
maxID;
//find 16 surrounding nodes
double theta,
thetaNode,
min=-10,
max=10;
while (xv[ii] < bx[idx])
ii++;
I0 = ii-2;
If = ii+1;
while (yu[jj] < by[idx])
jj++;
J0=jj-2;
Jf=jj+1;
thetaNode = asin((by[idx]-midY)/sqrt(pow(bx[idx]-midX,2)+pow(by[idx]-midY,2)));
if (bx[idx] < midX)//this janky if statement forces theta to be continuous
{
thetaNode = M_PI-thetaNode;
}
if (thetaNode > M_PI*5/4 || thetaNode < -M_PI/4)
{
if(bx[idx]>midX)
thetaNode += 2*M_PI;
}
//sweep over nodes calculating theta
//find theta above and below node
for (int i = I0; i<=If; i++)
{
for(int j=J0;j<=Jf;j++)
{
ip = j*nx+i;
if (ghostTagsP[ip]>0)
{
theta = asin((body_intercept_p_y[ip]-midY)/sqrt(pow(body_intercept_p_x[ip]-midX,2)+pow(body_intercept_p_y[ip]-midY,2)));
if (body_intercept_p_x[ip]<midX)
{
theta = M_PI-theta;
}
if (thetaNode > M_PI*5/4 || thetaNode < -M_PI/4)
{
if(body_intercept_p_x[ip]>midX)
theta += 2*M_PI;
}
thetaID = ip;
if (theta > thetaNode && theta < max)
{
max = theta;
maxID = thetaID;
}
if (theta<thetaNode && theta > min)
{
min = theta;
minID = thetaID;
}
}
count ++;
}
}
//interp for node
force_pressure[idx] = body_intercept_p[minID] + (body_intercept_p[maxID] - body_intercept_p[minID]) * (thetaNode-min) / (max-min);
}
__global__
void force_velocity_x(double *force_dudx, double *uB, double *u,
double *bx, double *by, double *xu, double *yu,
int *i_start, int *j_start, int width, int height, int totalPoints, int nx, int ny, double midX, double midY, double dx)
{
//initialise
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= totalPoints)
return;
int ii = i_start[0],
jj = j_start[0];
double y3, q3, q4,
x1, x2, y1, q1, q2;
//find extended image point
double rise = by[idx]-midY,
run = bx[idx]-midX,
radius = sqrt(rise*rise+run*run);
double dn = dx*sqrt(2.0),//distance from body to calc normal at, needs to be at least sqrt(2)*dx to place it a full node away from the body
ipx = bx[idx] + dn/radius*run,
ipy = by[idx] + dn/radius*rise;
//find points bounding extended image point
while (xu[ii] < ipx)
ii++;
x1 = xu[ii-1]; x2 = xu[ii];
while (yu[jj] < ipy)
jj++;
y3 = yu[jj];
y1 = yu[jj-1];
q3 = u[(jj)*(nx-1)+(ii-1)]; q4 = u[(jj)*(nx-1)+(ii)];
q1 = u[(jj-1)*(nx-1)+(ii-1)]; q2 = u[(jj-1)*(nx-1)+(ii)];
//interp for u at extended image point //flag grid must be uniform
//http://www.ajdesigner.com/phpinterpolation/bilinear_interpolation_equation.php
double topleft = (x2-ipx)*(y1-ipy)/(x2-x1)/(y1-y3)*q3,
topright = (ipx-x1)*(y1-ipy)/(x2-x1)/(y1-y3)*q4,
botleft = (x2-ipx)*(ipy-y3)/(x2-x1)/(y1-y3)*q1,
botright = (ipx-x1)*(ipy-y3)/(x2-x1)/(y1-y3)*q2,
ipu = botleft + botright + topleft + topright;
//calc normal derivative
force_dudx[idx] = (ipu-uB[0])/dn;
}
__global__
void force_velocity_y(double *force_dvdx, double *vB, double *u,
double *bx, double *by, double *xv, double *yv,
int *i_start, int *j_start, int width, int height, int totalPoints, int nx, int ny, double midX, double midY, double dx)
{
//initialise
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= totalPoints)
return;
int ii = i_start[0],
jj = j_start[0];
double y3, q3, q4,
x1, x2, y1, q1, q2;
//find extended image point
double rise = by[idx]-midY,
run = bx[idx]-midX,
radius = sqrt(rise*rise+run*run);
double dn = dx*sqrt(2.0),//distance from body to calc normal at, needs to be at least sqrt(2)*dx to place it a full node away from the body
ipx = bx[idx] + dn/radius*run,
ipy = by[idx] + dn/radius*rise;
//find points bounding extended image point
while (xv[ii] < ipx)
ii++;
x1 = xv[ii-1]; x2 = xv[ii];
while (yv[jj] < ipy)
jj++;
y3 = yv[jj];
y1 = yv[jj-1];
q3 = u[(jj)*(nx-1)+(ii-1) + ny*(nx-1)]; q4 = u[(jj)*(nx-1)+(ii) + ny*(nx-1)];
q1 = u[(jj-1)*(nx-1)+(ii-1) + ny*(nx-1)]; q2 = u[(jj-1)*(nx-1)+(ii) + ny*(nx-1)];
//interp for u at extended image point //flag grid must be uniform
//http://www.ajdesigner.com/phpinterpolation/bilinear_interpolation_equation.php
double topleft = (x2-ipx)*(y1-ipy)/(x2-x1)/(y1-y3)*q3,
topright = (ipx-x1)*(y1-ipy)/(x2-x1)/(y1-y3)*q4,
botleft = (x2-ipx)*(ipy-y3)/(x2-x1)/(y1-y3)*q1,
botright = (ipx-x1)*(ipy-y3)/(x2-x1)/(y1-y3)*q2,
ipu = botleft + botright + topleft + topright;
//calc normal derivative
force_dvdx[idx] = (ipu-vB[0])/dn;
}
__global__
void force(double *force_x, double *force_y, double *pressure, double *dudn, double *dvdn,
double *bx, double *by,
int totalPoints, double midX, double midY, double nu)
{
//initialise
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= totalPoints)
return;
//get area
double area = sqrt(pow(bx[0]-bx[1],2) + pow(by[0]-by[1],2));
//get normal vector
double h = sqrt(pow(by[idx]-midY,2) + pow(bx[idx]-midX,2)),
n1 = (bx[idx]-midX)/h,
n2 = (by[idx]-midY)/h;
//calc tang stress
double mu = nu,
tau_x = mu*((1-n1*n1)*dudn[idx]+(-n1*n2)*dvdn[idx]),
tau_y = mu*((-n1*n2)*dudn[idx]+(1-n2*n2)*dvdn[idx]);
//integrate
force_x[idx] = area * tau_x - area * n1 * pressure[idx];
force_y[idx] = area * tau_y - area * n2 * pressure[idx];
}
}
| a2bb441d1f8c8bb11249d2ac76e4173e88eecacd.cu | /***************************************************************************//**
* \file calculateForce.cu
* \author Christopher Minar ([email protected])
* \brief
*/
#include "calculateForce.h"
namespace kernels
{
__global__//kernel should be of size totalPoints
void force_pressure(double *force_pressure, double *body_intercept_p,
double *body_intercept_p_x, double *body_intercept_p_y,
double *bx, double *by, double *xv, double *yu, int *ghostTagsP,
int *i_start, int *j_start, int width, int height, int totalPoints, int nx, int ny, double midX, double midY)
{
//initialise
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= totalPoints)
return;
int ii = i_start[0],
jj = j_start[0],
ip;
int I0,
If,
J0,
Jf,
count = 0,
thetaID,
minID,
maxID;
//find 16 surrounding nodes
double theta,
thetaNode,
min=-10,
max=10;
while (xv[ii] < bx[idx])
ii++;
I0 = ii-2;
If = ii+1;
while (yu[jj] < by[idx])
jj++;
J0=jj-2;
Jf=jj+1;
thetaNode = asin((by[idx]-midY)/sqrt(pow(bx[idx]-midX,2)+pow(by[idx]-midY,2)));
if (bx[idx] < midX)//this janky if statement forces theta to be continuous
{
thetaNode = M_PI-thetaNode;
}
if (thetaNode > M_PI*5/4 || thetaNode < -M_PI/4)
{
if(bx[idx]>midX)
thetaNode += 2*M_PI;
}
//sweep over nodes calculating theta
//find theta above and below node
for (int i = I0; i<=If; i++)
{
for(int j=J0;j<=Jf;j++)
{
ip = j*nx+i;
if (ghostTagsP[ip]>0)
{
theta = asin((body_intercept_p_y[ip]-midY)/sqrt(pow(body_intercept_p_x[ip]-midX,2)+pow(body_intercept_p_y[ip]-midY,2)));
if (body_intercept_p_x[ip]<midX)
{
theta = M_PI-theta;
}
if (thetaNode > M_PI*5/4 || thetaNode < -M_PI/4)
{
if(body_intercept_p_x[ip]>midX)
theta += 2*M_PI;
}
thetaID = ip;
if (theta > thetaNode && theta < max)
{
max = theta;
maxID = thetaID;
}
if (theta<thetaNode && theta > min)
{
min = theta;
minID = thetaID;
}
}
count ++;
}
}
//interp for node
force_pressure[idx] = body_intercept_p[minID] + (body_intercept_p[maxID] - body_intercept_p[minID]) * (thetaNode-min) / (max-min);
}
__global__
void force_velocity_x(double *force_dudx, double *uB, double *u,
double *bx, double *by, double *xu, double *yu,
int *i_start, int *j_start, int width, int height, int totalPoints, int nx, int ny, double midX, double midY, double dx)
{
//initialise
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= totalPoints)
return;
int ii = i_start[0],
jj = j_start[0];
double y3, q3, q4,
x1, x2, y1, q1, q2;
//find extended image point
double rise = by[idx]-midY,
run = bx[idx]-midX,
radius = sqrt(rise*rise+run*run);
double dn = dx*sqrt(2.0),//distance from body to calc normal at, needs to be at least sqrt(2)*dx to place it a full node away from the body
ipx = bx[idx] + dn/radius*run,
ipy = by[idx] + dn/radius*rise;
//find points bounding extended image point
while (xu[ii] < ipx)
ii++;
x1 = xu[ii-1]; x2 = xu[ii];
while (yu[jj] < ipy)
jj++;
y3 = yu[jj];
y1 = yu[jj-1];
q3 = u[(jj)*(nx-1)+(ii-1)]; q4 = u[(jj)*(nx-1)+(ii)];
q1 = u[(jj-1)*(nx-1)+(ii-1)]; q2 = u[(jj-1)*(nx-1)+(ii)];
//interp for u at extended image point //flag grid must be uniform
//http://www.ajdesigner.com/phpinterpolation/bilinear_interpolation_equation.php
double topleft = (x2-ipx)*(y1-ipy)/(x2-x1)/(y1-y3)*q3,
topright = (ipx-x1)*(y1-ipy)/(x2-x1)/(y1-y3)*q4,
botleft = (x2-ipx)*(ipy-y3)/(x2-x1)/(y1-y3)*q1,
botright = (ipx-x1)*(ipy-y3)/(x2-x1)/(y1-y3)*q2,
ipu = botleft + botright + topleft + topright;
//calc normal derivative
force_dudx[idx] = (ipu-uB[0])/dn;
}
__global__
void force_velocity_y(double *force_dvdx, double *vB, double *u,
double *bx, double *by, double *xv, double *yv,
int *i_start, int *j_start, int width, int height, int totalPoints, int nx, int ny, double midX, double midY, double dx)
{
//initialise
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= totalPoints)
return;
int ii = i_start[0],
jj = j_start[0];
double y3, q3, q4,
x1, x2, y1, q1, q2;
//find extended image point
double rise = by[idx]-midY,
run = bx[idx]-midX,
radius = sqrt(rise*rise+run*run);
double dn = dx*sqrt(2.0),//distance from body to calc normal at, needs to be at least sqrt(2)*dx to place it a full node away from the body
ipx = bx[idx] + dn/radius*run,
ipy = by[idx] + dn/radius*rise;
//find points bounding extended image point
while (xv[ii] < ipx)
ii++;
x1 = xv[ii-1]; x2 = xv[ii];
while (yv[jj] < ipy)
jj++;
y3 = yv[jj];
y1 = yv[jj-1];
q3 = u[(jj)*(nx-1)+(ii-1) + ny*(nx-1)]; q4 = u[(jj)*(nx-1)+(ii) + ny*(nx-1)];
q1 = u[(jj-1)*(nx-1)+(ii-1) + ny*(nx-1)]; q2 = u[(jj-1)*(nx-1)+(ii) + ny*(nx-1)];
//interp for u at extended image point //flag grid must be uniform
//http://www.ajdesigner.com/phpinterpolation/bilinear_interpolation_equation.php
double topleft = (x2-ipx)*(y1-ipy)/(x2-x1)/(y1-y3)*q3,
topright = (ipx-x1)*(y1-ipy)/(x2-x1)/(y1-y3)*q4,
botleft = (x2-ipx)*(ipy-y3)/(x2-x1)/(y1-y3)*q1,
botright = (ipx-x1)*(ipy-y3)/(x2-x1)/(y1-y3)*q2,
ipu = botleft + botright + topleft + topright;
//calc normal derivative
force_dvdx[idx] = (ipu-vB[0])/dn;
}
__global__
void force(double *force_x, double *force_y, double *pressure, double *dudn, double *dvdn,
double *bx, double *by,
int totalPoints, double midX, double midY, double nu)
{
//initialise
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= totalPoints)
return;
//get area
double area = sqrt(pow(bx[0]-bx[1],2) + pow(by[0]-by[1],2));
//get normal vector
double h = sqrt(pow(by[idx]-midY,2) + pow(bx[idx]-midX,2)),
n1 = (bx[idx]-midX)/h,
n2 = (by[idx]-midY)/h;
//calc tang stress
double mu = nu,
tau_x = mu*((1-n1*n1)*dudn[idx]+(-n1*n2)*dvdn[idx]),
tau_y = mu*((-n1*n2)*dudn[idx]+(1-n2*n2)*dvdn[idx]);
//integrate
force_x[idx] = area * tau_x - area * n1 * pressure[idx];
force_y[idx] = area * tau_y - area * n2 * pressure[idx];
}
}
|
54d99a02eca208ed84a78e7d8981dd78badd5414.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <torch/library.h>
#include "cuda_helpers.h"
namespace vision {
namespace ops {
namespace {
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename T>
__device__ inline bool devIoU(
T const* const a,
T const* const b,
const float threshold) {
T left = max(a[0], b[0]), right = min(a[2], b[2]);
T top = max(a[1], b[1]), bottom = min(a[3], b[3]);
T width = max(right - left, (T)0), height = max(bottom - top, (T)0);
T interS = width * height;
T Sa = (a[2] - a[0]) * (a[3] - a[1]);
T Sb = (b[2] - b[0]) * (b[3] - b[1]);
return (interS / (Sa + Sb - interS)) > threshold;
}
template <typename T>
__global__ void nms_kernel_impl(
int n_boxes,
double iou_threshold,
const T* dev_boxes,
unsigned long long* dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
if (row_start > col_start)
return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ T block_boxes[threadsPerBlock * 4];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 4 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0];
block_boxes[threadIdx.x * 4 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1];
block_boxes[threadIdx.x * 4 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2];
block_boxes[threadIdx.x * 4 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_boxes + cur_box_idx * 4;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU<T>(cur_box, block_boxes + i * 4, iou_threshold)) {
t |= 1ULL << i;
}
}
const int col_blocks = ceil_div(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
at::Tensor nms_kernel(
const at::Tensor& dets,
const at::Tensor& scores,
double iou_threshold) {
TORCH_CHECK(dets.is_cuda(), "dets must be a CUDA tensor");
TORCH_CHECK(scores.is_cuda(), "scores must be a CUDA tensor");
TORCH_CHECK(
dets.dim() == 2, "boxes should be a 2d tensor, got ", dets.dim(), "D");
TORCH_CHECK(
dets.size(1) == 4,
"boxes should have 4 elements in dimension 1, got ",
dets.size(1));
TORCH_CHECK(
scores.dim() == 1,
"scores should be a 1d tensor, got ",
scores.dim(),
"D");
TORCH_CHECK(
dets.size(0) == scores.size(0),
"boxes and scores should have same number of elements in ",
"dimension 0, got ",
dets.size(0),
" and ",
scores.size(0))
#if defined(WITH_CUDA) || defined(WITH_HIP)
at::hip::HIPGuardMasqueradingAsCUDA device_guard(dets.device());
#else
TORCH_CHECK(false, "Not compiled with GPU support");
#endif
if (dets.numel() == 0) {
return at::empty({0}, dets.options().dtype(at::kLong));
}
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto dets_sorted = dets.index_select(0, order_t).contiguous();
int dets_num = dets.size(0);
const int col_blocks = ceil_div(dets_num, threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
dets_sorted.scalar_type(), "nms_kernel", [&] {
hipLaunchKernelGGL(( nms_kernel_impl<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dets_num,
iou_threshold,
dets_sorted.data_ptr<scalar_t>(),
(unsigned long long*)mask.data_ptr<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host =
(unsigned long long*)mask_cpu.data_ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(hipGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl("nms", nms_kernel);
}
} // namespace ops
} // namespace vision
| 54d99a02eca208ed84a78e7d8981dd78badd5414.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <torch/library.h>
#include "cuda_helpers.h"
namespace vision {
namespace ops {
namespace {
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename T>
__device__ inline bool devIoU(
T const* const a,
T const* const b,
const float threshold) {
T left = max(a[0], b[0]), right = min(a[2], b[2]);
T top = max(a[1], b[1]), bottom = min(a[3], b[3]);
T width = max(right - left, (T)0), height = max(bottom - top, (T)0);
T interS = width * height;
T Sa = (a[2] - a[0]) * (a[3] - a[1]);
T Sb = (b[2] - b[0]) * (b[3] - b[1]);
return (interS / (Sa + Sb - interS)) > threshold;
}
template <typename T>
__global__ void nms_kernel_impl(
int n_boxes,
double iou_threshold,
const T* dev_boxes,
unsigned long long* dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
if (row_start > col_start)
return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ T block_boxes[threadsPerBlock * 4];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 4 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0];
block_boxes[threadIdx.x * 4 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1];
block_boxes[threadIdx.x * 4 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2];
block_boxes[threadIdx.x * 4 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_boxes + cur_box_idx * 4;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU<T>(cur_box, block_boxes + i * 4, iou_threshold)) {
t |= 1ULL << i;
}
}
const int col_blocks = ceil_div(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
at::Tensor nms_kernel(
const at::Tensor& dets,
const at::Tensor& scores,
double iou_threshold) {
TORCH_CHECK(dets.is_cuda(), "dets must be a CUDA tensor");
TORCH_CHECK(scores.is_cuda(), "scores must be a CUDA tensor");
TORCH_CHECK(
dets.dim() == 2, "boxes should be a 2d tensor, got ", dets.dim(), "D");
TORCH_CHECK(
dets.size(1) == 4,
"boxes should have 4 elements in dimension 1, got ",
dets.size(1));
TORCH_CHECK(
scores.dim() == 1,
"scores should be a 1d tensor, got ",
scores.dim(),
"D");
TORCH_CHECK(
dets.size(0) == scores.size(0),
"boxes and scores should have same number of elements in ",
"dimension 0, got ",
dets.size(0),
" and ",
scores.size(0))
#if defined(WITH_CUDA) || defined(WITH_HIP)
at::cuda::CUDAGuard device_guard(dets.device());
#else
TORCH_CHECK(false, "Not compiled with GPU support");
#endif
if (dets.numel() == 0) {
return at::empty({0}, dets.options().dtype(at::kLong));
}
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto dets_sorted = dets.index_select(0, order_t).contiguous();
int dets_num = dets.size(0);
const int col_blocks = ceil_div(dets_num, threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
dets_sorted.scalar_type(), "nms_kernel", [&] {
nms_kernel_impl<scalar_t><<<blocks, threads, 0, stream>>>(
dets_num,
iou_threshold,
dets_sorted.data_ptr<scalar_t>(),
(unsigned long long*)mask.data_ptr<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host =
(unsigned long long*)mask_cpu.data_ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(cudaGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl("nms", nms_kernel);
}
} // namespace ops
} // namespace vision
|
9f0280470156e10a62a6fa62a3e8e90b1ad3372a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../utils/util.cuh"
#include "../operators/reduce_operators.cuh"
#include <cstdio>
using namespace akg_reduce;
using namespace std;
// check whether the op-atomic transformation is correct.
// compile code: nvcc test_atomic.cu -arch=sm_70
template <typename T>
__global__ void AtomicTestSum(T *dest, T val) {
SumOp<T> op;
AtomicOp<T, op.identifier> atomic_op;
atomic_op.Compute(&dest[threadIdx.x], val);
}
template <typename T>
__global__ void AtomicTestMax(T *dest, T val) {
MaxOp<T> op;
AtomicOp<T, op.identifier> atomic_op;
atomic_op.Compute(&dest[threadIdx.x], val);
}
template <typename T>
__global__ void AtomicTestMin(T *dest, T val) {
MinOp<T> op;
AtomicOp<T, op.identifier> atomic_op;
atomic_op.Compute(&dest[threadIdx.x], val);
}
template <typename T>
void TestAtomicSum() {
cout << "TestAtomicSum" << endl;
int items = 1000;
int bytes = items * sizeof(T);
T *h_a, *d_a;
h_a = (T *)malloc(bytes);
for (auto i = 0; i < items; i++) {
if (sizeof(T) == 2) {
h_a[i] = __float2half(0.0);
} else {
h_a[i] = 0.0;
}
}
GetGpuErr(hipMalloc((void **)&d_a, bytes));
GetGpuErr(hipMemcpy((void *)d_a, (void *)h_a, bytes, hipMemcpyHostToDevice));
dim3 grid(1000);
dim3 block(1000);
hipLaunchKernelGGL(( AtomicTestSum<T>), dim3(grid), dim3(block), 0, 0, d_a, 1.0);
GetGpuErr(hipPeekAtLastError());
GetGpuErr(hipMemcpy((void *)h_a, (void *)d_a, bytes, hipMemcpyDeviceToHost));
for (auto i = 0; i < 10; i++) {
double tmp;
if (sizeof(T) == 2) {
tmp = __half2float(h_a[i]);
} else {
tmp = h_a[i];
}
printf("%f ", tmp);
}
printf("\n");
GetGpuErr(hipFree(d_a));
free(h_a);
}
template <typename T>
void TestAtomicMax() {
cout << "TestAtomicMax" << endl;
int items = 10;
int bytes = items * sizeof(T);
T *h_a, *d_a;
h_a = (T *)malloc(bytes);
for (auto i = 0; i < items; i++) {
if (sizeof(T) == 2) {
h_a[i] = __float2half(i);
} else {
h_a[i] = i;
}
}
GetGpuErr(hipMalloc((void **)&d_a, bytes));
GetGpuErr(hipMemcpy((void *)d_a, (void *)h_a, bytes, hipMemcpyHostToDevice));
double val = 1.234567891012345;
dim3 grid(10000);
dim3 block(items);
hipLaunchKernelGGL(( AtomicTestMax<T>), dim3(grid), dim3(block), 0, 0, d_a, val);
GetGpuErr(hipPeekAtLastError());
GetGpuErr(hipMemcpy((void *)h_a, (void *)d_a, bytes, hipMemcpyDeviceToHost));
for (auto i = 0; i < 5; i++) {
double tmp;
if (sizeof(T) == 2) {
tmp = __half2float(h_a[i]);
} else {
tmp = h_a[i];
}
printf("%.12f ", tmp);
}
printf("\n");
GetGpuErr(hipFree(d_a));
free(h_a);
}
template <typename T>
void TestAtomicMin() {
cout << "TestAtomicMin" << endl;
int items = 10;
int bytes = items * sizeof(T);
T *h_a, *d_a;
h_a = (T *)malloc(bytes);
for (auto i = 0; i < items; i++) {
h_a[i] = __float2half(i);
}
GetGpuErr(hipMalloc((void **)&d_a, bytes));
GetGpuErr(hipMemcpy((void *)d_a, (void *)h_a, bytes, hipMemcpyHostToDevice));
double val = 1.234567891012345;
dim3 grid(10000);
dim3 block(items);
hipLaunchKernelGGL(( AtomicTestMin<T>), dim3(grid), dim3(block), 0, 0, d_a, val);
GetGpuErr(hipPeekAtLastError());
GetGpuErr(hipMemcpy((void *)h_a, (void *)d_a, bytes, hipMemcpyDeviceToHost));
for (auto i = 0; i < 5; i++) {
double tmp;
if (sizeof(T) == 2) {
tmp = __half2float(h_a[i]);
} else {
tmp = h_a[i];
}
printf("%.12f ", tmp);
}
printf("\n");
GetGpuErr(hipFree(d_a));
free(h_a);
}
int main() {
TestAtomicSum<float>();
TestAtomicSum<double>();
TestAtomicSum<half>();
TestAtomicMax<float>();
TestAtomicMax<double>();
TestAtomicMax<half>();
TestAtomicMin<float>();
TestAtomicMin<double>();
TestAtomicMin<half>();
return 0;
}
| 9f0280470156e10a62a6fa62a3e8e90b1ad3372a.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../utils/util.cuh"
#include "../operators/reduce_operators.cuh"
#include <cstdio>
using namespace akg_reduce;
using namespace std;
// check whether the op-atomic transformation is correct.
// compile code: nvcc test_atomic.cu -arch=sm_70
template <typename T>
__global__ void AtomicTestSum(T *dest, T val) {
SumOp<T> op;
AtomicOp<T, op.identifier> atomic_op;
atomic_op.Compute(&dest[threadIdx.x], val);
}
template <typename T>
__global__ void AtomicTestMax(T *dest, T val) {
MaxOp<T> op;
AtomicOp<T, op.identifier> atomic_op;
atomic_op.Compute(&dest[threadIdx.x], val);
}
template <typename T>
__global__ void AtomicTestMin(T *dest, T val) {
MinOp<T> op;
AtomicOp<T, op.identifier> atomic_op;
atomic_op.Compute(&dest[threadIdx.x], val);
}
template <typename T>
void TestAtomicSum() {
cout << "TestAtomicSum" << endl;
int items = 1000;
int bytes = items * sizeof(T);
T *h_a, *d_a;
h_a = (T *)malloc(bytes);
for (auto i = 0; i < items; i++) {
if (sizeof(T) == 2) {
h_a[i] = __float2half(0.0);
} else {
h_a[i] = 0.0;
}
}
GetGpuErr(cudaMalloc((void **)&d_a, bytes));
GetGpuErr(cudaMemcpy((void *)d_a, (void *)h_a, bytes, cudaMemcpyHostToDevice));
dim3 grid(1000);
dim3 block(1000);
AtomicTestSum<T><<<grid, block>>>(d_a, 1.0);
GetGpuErr(cudaPeekAtLastError());
GetGpuErr(cudaMemcpy((void *)h_a, (void *)d_a, bytes, cudaMemcpyDeviceToHost));
for (auto i = 0; i < 10; i++) {
double tmp;
if (sizeof(T) == 2) {
tmp = __half2float(h_a[i]);
} else {
tmp = h_a[i];
}
printf("%f ", tmp);
}
printf("\n");
GetGpuErr(cudaFree(d_a));
free(h_a);
}
template <typename T>
void TestAtomicMax() {
cout << "TestAtomicMax" << endl;
int items = 10;
int bytes = items * sizeof(T);
T *h_a, *d_a;
h_a = (T *)malloc(bytes);
for (auto i = 0; i < items; i++) {
if (sizeof(T) == 2) {
h_a[i] = __float2half(i);
} else {
h_a[i] = i;
}
}
GetGpuErr(cudaMalloc((void **)&d_a, bytes));
GetGpuErr(cudaMemcpy((void *)d_a, (void *)h_a, bytes, cudaMemcpyHostToDevice));
double val = 1.234567891012345;
dim3 grid(10000);
dim3 block(items);
AtomicTestMax<T><<<grid, block>>>(d_a, val);
GetGpuErr(cudaPeekAtLastError());
GetGpuErr(cudaMemcpy((void *)h_a, (void *)d_a, bytes, cudaMemcpyDeviceToHost));
for (auto i = 0; i < 5; i++) {
double tmp;
if (sizeof(T) == 2) {
tmp = __half2float(h_a[i]);
} else {
tmp = h_a[i];
}
printf("%.12f ", tmp);
}
printf("\n");
GetGpuErr(cudaFree(d_a));
free(h_a);
}
template <typename T>
void TestAtomicMin() {
cout << "TestAtomicMin" << endl;
int items = 10;
int bytes = items * sizeof(T);
T *h_a, *d_a;
h_a = (T *)malloc(bytes);
for (auto i = 0; i < items; i++) {
h_a[i] = __float2half(i);
}
GetGpuErr(cudaMalloc((void **)&d_a, bytes));
GetGpuErr(cudaMemcpy((void *)d_a, (void *)h_a, bytes, cudaMemcpyHostToDevice));
double val = 1.234567891012345;
dim3 grid(10000);
dim3 block(items);
AtomicTestMin<T><<<grid, block>>>(d_a, val);
GetGpuErr(cudaPeekAtLastError());
GetGpuErr(cudaMemcpy((void *)h_a, (void *)d_a, bytes, cudaMemcpyDeviceToHost));
for (auto i = 0; i < 5; i++) {
double tmp;
if (sizeof(T) == 2) {
tmp = __half2float(h_a[i]);
} else {
tmp = h_a[i];
}
printf("%.12f ", tmp);
}
printf("\n");
GetGpuErr(cudaFree(d_a));
free(h_a);
}
int main() {
TestAtomicSum<float>();
TestAtomicSum<double>();
TestAtomicSum<half>();
TestAtomicMax<float>();
TestAtomicMax<double>();
TestAtomicMax<half>();
TestAtomicMin<float>();
TestAtomicMin<double>();
TestAtomicMin<half>();
return 0;
}
|
55604f70de0d9542323739147cb13f08a79e9d73.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <domains/IntDomainsActions.h>
#include <utils/Utils.h>
void IntDomainsActions::initialize(int count)
{
elementsToRemove.initialize(count);
lowerbounds.initialize(count);
upperbounds.initialize(count);
#ifdef GPU
locks.initialize(count);
#endif
}
void IntDomainsActions::deinitialize()
{
for (int i = 0; i < elementsToRemove.size; i += 1)
{
elementsToRemove[i].deinitialize();
}
elementsToRemove.deinitialize();
lowerbounds.deinitialize();
upperbounds.deinitialize();
#ifdef GPU
locks.deinitialize();
#endif
}
void IntDomainsActions::push()
{
elementsToRemove.resize_by_one();
elementsToRemove.back().initialize();
lowerbounds.push_back(INT_MIN);
upperbounds.push_back(INT_MAX);
#ifdef GPU
locks.resize_by_one();
locks.back().initialize();
#endif
}
cudaDevice void IntDomainsActions::clear(int index)
{
elementsToRemove[index].clear();
lowerbounds[index] = INT_MIN;
upperbounds[index] = INT_MAX;
}
cudaDevice void IntDomainsActions::removeElement(int index, int val)
{
if (lowerbounds[index] <= val and val <= upperbounds[index])
{
#ifdef GPU
locks[index].lock();
#endif
elementsToRemove[index].push_back(val);
#ifdef GPU
locks[index].unlock();
#endif
}
}
cudaDevice void IntDomainsActions::removeAnyGreaterThan(int index, int val)
{
#ifdef GPU
__threadfence();
atomicMin(&upperbounds[index], val);
#else
upperbounds[index] = ::min(val, upperbounds[index]);
#endif
}
cudaDevice void IntDomainsActions::removeAnyLesserThan(int index, int val)
{
#ifdef GPU
__threadfence();
atomicMax(&lowerbounds[index], val);
#else
lowerbounds[index] = ::max(val, lowerbounds[index]);
#endif
}
| 55604f70de0d9542323739147cb13f08a79e9d73.cu | #include <algorithm>
#include <domains/IntDomainsActions.h>
#include <utils/Utils.h>
void IntDomainsActions::initialize(int count)
{
elementsToRemove.initialize(count);
lowerbounds.initialize(count);
upperbounds.initialize(count);
#ifdef GPU
locks.initialize(count);
#endif
}
void IntDomainsActions::deinitialize()
{
for (int i = 0; i < elementsToRemove.size; i += 1)
{
elementsToRemove[i].deinitialize();
}
elementsToRemove.deinitialize();
lowerbounds.deinitialize();
upperbounds.deinitialize();
#ifdef GPU
locks.deinitialize();
#endif
}
void IntDomainsActions::push()
{
elementsToRemove.resize_by_one();
elementsToRemove.back().initialize();
lowerbounds.push_back(INT_MIN);
upperbounds.push_back(INT_MAX);
#ifdef GPU
locks.resize_by_one();
locks.back().initialize();
#endif
}
cudaDevice void IntDomainsActions::clear(int index)
{
elementsToRemove[index].clear();
lowerbounds[index] = INT_MIN;
upperbounds[index] = INT_MAX;
}
cudaDevice void IntDomainsActions::removeElement(int index, int val)
{
if (lowerbounds[index] <= val and val <= upperbounds[index])
{
#ifdef GPU
locks[index].lock();
#endif
elementsToRemove[index].push_back(val);
#ifdef GPU
locks[index].unlock();
#endif
}
}
cudaDevice void IntDomainsActions::removeAnyGreaterThan(int index, int val)
{
#ifdef GPU
__threadfence();
atomicMin(&upperbounds[index], val);
#else
upperbounds[index] = std::min(val, upperbounds[index]);
#endif
}
cudaDevice void IntDomainsActions::removeAnyLesserThan(int index, int val)
{
#ifdef GPU
__threadfence();
atomicMax(&lowerbounds[index], val);
#else
lowerbounds[index] = std::max(val, lowerbounds[index]);
#endif
}
|
cc7dfa3a73cec5c5b06cbbb7bee189be60bd12ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include "hipcub/hipcub.hpp"
#include "gputimer.h"
#include <hiprand/hiprand_kernel.h>
using namespace std;
__device__ __host__ void reverse(int *A, int i, int j){
for(int k = i+1, l = j; k<=(i+j)/2; k++,l--){
int temp = A[k];
A[k] = A[l];
A[l] = temp;
}
}
__global__ void tsp_tpred(city *cities, int *tour,long initcost,unsigned long long *dst_tid,long cit,long itr)
{
long id,j,k;
register long change,mincost=initcost,cost;
long i=threadIdx.x+blockIdx.x*blockDim.x;
if(i < cit)
{
for(k=0;k<itr;k++)
{
change = 0; cost=initcost;
j=(i+1+k)%cit;
change=distD(cities, tour[i], tour[j]) + distD(cities, tour[(i+1)%cit], tour[(j+1)%cit]) - distD(cities, tour[i], tour[(i+1)%cit]) - distD(cities, tour[j], tour[(j+1)%cit]);
cost+=change;
if(cost < mincost)
{
mincost = cost;
if(i < j)
id = i * (cit-1)+(j-1)-i*(i+1)/2;
else
id = j * (cit-1)+(i-1)-j*(j+1)/2;
}
}
if(mincost < initcost)
atomicMin(dst_tid, ((unsigned long long)mincost << 32) | id);
}
}
template<
int threads>
__global__ void cons_soln(ant *ants, double *total, city *cities, int n, int m){
//reseting the ants
if(threadIdx.x == 0){
for(int i = 0; i < n; ++i) ants[blockIdx.x].visited[i] = 1;
}
typedef hipcub::BlockReduce<hipcub::KeyValuePair<int, double>, threads> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
//placing the ants at initital city
if(threadIdx.x == 0){
hiprandState_t rndstate;
hiprand_init(blockIdx.x, 0, 0, &rndstate);
int start = hiprand(&rndstate) % (n);
ants[blockIdx.x].tour[0] = start;
ants[blockIdx.x].tour[n] = start;
ants[blockIdx.x].visited[start] = 0;
}
__syncthreads();
hipcub::KeyValuePair<int, double> max, t1[35];
int step = 1;
int curr;
while(step < n){
curr = ants[blockIdx.x].tour[step-1];
for(int i = 0; i < 35; ++i){
if(threadIdx.x*35+i < n){
t1[i].value = total[curr * n + threadIdx.x*35 + i] * ants[blockIdx.x].visited[threadIdx.x * 35 + i];
t1[i].key = threadIdx.x * 35 + i;
}
else{
t1[i].key = -1;
t1[i].value = 0;
}
}
max = BlockReduce(temp_storage).Reduce(t1, hipcub::ArgMax());
__syncthreads();
if(threadIdx.x == 0){
ants[blockIdx.x].tour[step] = max.key;
ants[blockIdx.x].visited[max.key] = false;
}
step++;
}
__syncthreads();
if(threadIdx.x == 0){
ants[blockIdx.x].tour_length = get_tour_length(ants[blockIdx.x].tour, cities, n);
}
}
int main(int argc, char **argv){
city *cities;
ant *ants;
GpuTimer kernelTime;
ant *global_best_ant;
hipMallocManaged((void**)&global_best_ant, sizeof(ant));
hipMallocManaged((void**)&(global_best_ant->tour), sizeof(int)*(n+1));
global_best_ant->tour_length = LONG_MAX;
int itter_best_pos = 0;
read_file(&cities, argc, argv);
init_ants(&ants);
double t_max,t_min;
t_max = 1/(RHO*nn(cities));
t_min = t_max/(2*n);
cout<<t_max<<"\t"<<t_min<<endl;
init_pheromone(t_max);
compute_total_info(cities);
unsigned long long *dst_tid;
hipMallocManaged((void**)&dst_tid, sizeof(unsigned long long));
for(int itter = 0; itter < 1000; ++itter){
kernelTime.Start();
hipLaunchKernelGGL(( cons_soln<1024>), dim3(m),dim3(1024), 0, 0, ants,total,cities,n,m);
hipDeviceSynchronize();
cout<<hipGetErrorString(hipGetLastError())<<endl;
//-----------------------------------parallel Two opt----------------------------------------------------------------
*dst_tid = (((unsigned long long)ants[0].tour_length+1) << 32) - 1;
long sol = (n*(n-1))/2;
long itr = floor(n/2.0);
long dst;
long previ;
int blk,thrd;
if(n<512){
blk=1;
thrd=n;
}
else{
blk=(n-1)/512+1;
thrd=512;
}
for(int i = 0; i < 200; ++i){
dst = ants[i].tour_length;
do{
previ = dst;
hipLaunchKernelGGL(( tsp_tpred), dim3(blk),dim3(thrd), 0, 0, cities, ants[i].tour, dst, dst_tid, n, itr);
hipDeviceSynchronize();
dst = *dst_tid>>32;
int tid = *dst_tid & ((1ull << 32)-1);
int x = n-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
int y = tid-x*(n-1)+(x*(x+1)/2)+1;
if(dst < previ)
reverse(ants[i].tour, x, y);
}while(dst < previ);
ants[i].tour_length = get_tour_length(ants[i].tour, cities, n);
}
//-------------------------------------------------------------------------------------------------------------------
long min_tour = ants[itter_best_pos].tour_length;
for(int i = 0; i < m; ++i){
if(min_tour > ants[i].tour_length){
min_tour = ants[i].tour_length;
itter_best_pos = i;
}
}
if(ants[itter_best_pos].tour_length < global_best_ant->tour_length){
global_best_ant->tour_length = ants[itter_best_pos].tour_length;
for(int i = 0; i <= n; ++i){
global_best_ant->tour[i] = ants[itter_best_pos].tour[i];
}
t_max = 1.0/(RHO*global_best_ant->tour_length);
t_min = t_max/(2*n);
}
evaporation();
if(!itter%10)
update_pheromone(*global_best_ant);
else
update_pheromone(ants[itter_best_pos]);
compute_total_info(cities);
kernelTime.Stop();
check_pheromone_limits(t_max, t_min);
double error = (global_best_ant->tour_length-optimal_solution)/(float)optimal_solution * 100;
cout<<itter<<","<<kernelTime.Elapsed()<<","<<ants[itter_best_pos].tour_length<<","<<global_best_ant->tour_length<<","<<error<<endl;
}
return 0;
}
| cc7dfa3a73cec5c5b06cbbb7bee189be60bd12ef.cu | #include "utils.h"
#include "cub/cub.cuh"
#include "gputimer.h"
#include <curand_kernel.h>
using namespace std;
__device__ __host__ void reverse(int *A, int i, int j){
for(int k = i+1, l = j; k<=(i+j)/2; k++,l--){
int temp = A[k];
A[k] = A[l];
A[l] = temp;
}
}
__global__ void tsp_tpred(city *cities, int *tour,long initcost,unsigned long long *dst_tid,long cit,long itr)
{
long id,j,k;
register long change,mincost=initcost,cost;
long i=threadIdx.x+blockIdx.x*blockDim.x;
if(i < cit)
{
for(k=0;k<itr;k++)
{
change = 0; cost=initcost;
j=(i+1+k)%cit;
change=distD(cities, tour[i], tour[j]) + distD(cities, tour[(i+1)%cit], tour[(j+1)%cit]) - distD(cities, tour[i], tour[(i+1)%cit]) - distD(cities, tour[j], tour[(j+1)%cit]);
cost+=change;
if(cost < mincost)
{
mincost = cost;
if(i < j)
id = i * (cit-1)+(j-1)-i*(i+1)/2;
else
id = j * (cit-1)+(i-1)-j*(j+1)/2;
}
}
if(mincost < initcost)
atomicMin(dst_tid, ((unsigned long long)mincost << 32) | id);
}
}
template<
int threads>
__global__ void cons_soln(ant *ants, double *total, city *cities, int n, int m){
//reseting the ants
if(threadIdx.x == 0){
for(int i = 0; i < n; ++i) ants[blockIdx.x].visited[i] = 1;
}
typedef cub::BlockReduce<cub::KeyValuePair<int, double>, threads> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
//placing the ants at initital city
if(threadIdx.x == 0){
curandState rndstate;
curand_init(blockIdx.x, 0, 0, &rndstate);
int start = curand(&rndstate) % (n);
ants[blockIdx.x].tour[0] = start;
ants[blockIdx.x].tour[n] = start;
ants[blockIdx.x].visited[start] = 0;
}
__syncthreads();
cub::KeyValuePair<int, double> max, t1[35];
int step = 1;
int curr;
while(step < n){
curr = ants[blockIdx.x].tour[step-1];
for(int i = 0; i < 35; ++i){
if(threadIdx.x*35+i < n){
t1[i].value = total[curr * n + threadIdx.x*35 + i] * ants[blockIdx.x].visited[threadIdx.x * 35 + i];
t1[i].key = threadIdx.x * 35 + i;
}
else{
t1[i].key = -1;
t1[i].value = 0;
}
}
max = BlockReduce(temp_storage).Reduce(t1, cub::ArgMax());
__syncthreads();
if(threadIdx.x == 0){
ants[blockIdx.x].tour[step] = max.key;
ants[blockIdx.x].visited[max.key] = false;
}
step++;
}
__syncthreads();
if(threadIdx.x == 0){
ants[blockIdx.x].tour_length = get_tour_length(ants[blockIdx.x].tour, cities, n);
}
}
int main(int argc, char **argv){
city *cities;
ant *ants;
GpuTimer kernelTime;
ant *global_best_ant;
cudaMallocManaged((void**)&global_best_ant, sizeof(ant));
cudaMallocManaged((void**)&(global_best_ant->tour), sizeof(int)*(n+1));
global_best_ant->tour_length = LONG_MAX;
int itter_best_pos = 0;
read_file(&cities, argc, argv);
init_ants(&ants);
double t_max,t_min;
t_max = 1/(RHO*nn(cities));
t_min = t_max/(2*n);
cout<<t_max<<"\t"<<t_min<<endl;
init_pheromone(t_max);
compute_total_info(cities);
unsigned long long *dst_tid;
cudaMallocManaged((void**)&dst_tid, sizeof(unsigned long long));
for(int itter = 0; itter < 1000; ++itter){
kernelTime.Start();
cons_soln<1024><<<m,1024>>>(ants,total,cities,n,m);
cudaDeviceSynchronize();
cout<<cudaGetErrorString(cudaGetLastError())<<endl;
//-----------------------------------parallel Two opt----------------------------------------------------------------
*dst_tid = (((unsigned long long)ants[0].tour_length+1) << 32) - 1;
long sol = (n*(n-1))/2;
long itr = floor(n/2.0);
long dst;
long previ;
int blk,thrd;
if(n<512){
blk=1;
thrd=n;
}
else{
blk=(n-1)/512+1;
thrd=512;
}
for(int i = 0; i < 200; ++i){
dst = ants[i].tour_length;
do{
previ = dst;
tsp_tpred<<<blk,thrd>>>(cities, ants[i].tour, dst, dst_tid, n, itr);
cudaDeviceSynchronize();
dst = *dst_tid>>32;
int tid = *dst_tid & ((1ull << 32)-1);
int x = n-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
int y = tid-x*(n-1)+(x*(x+1)/2)+1;
if(dst < previ)
reverse(ants[i].tour, x, y);
}while(dst < previ);
ants[i].tour_length = get_tour_length(ants[i].tour, cities, n);
}
//-------------------------------------------------------------------------------------------------------------------
long min_tour = ants[itter_best_pos].tour_length;
for(int i = 0; i < m; ++i){
if(min_tour > ants[i].tour_length){
min_tour = ants[i].tour_length;
itter_best_pos = i;
}
}
if(ants[itter_best_pos].tour_length < global_best_ant->tour_length){
global_best_ant->tour_length = ants[itter_best_pos].tour_length;
for(int i = 0; i <= n; ++i){
global_best_ant->tour[i] = ants[itter_best_pos].tour[i];
}
t_max = 1.0/(RHO*global_best_ant->tour_length);
t_min = t_max/(2*n);
}
evaporation();
if(!itter%10)
update_pheromone(*global_best_ant);
else
update_pheromone(ants[itter_best_pos]);
compute_total_info(cities);
kernelTime.Stop();
check_pheromone_limits(t_max, t_min);
double error = (global_best_ant->tour_length-optimal_solution)/(float)optimal_solution * 100;
cout<<itter<<","<<kernelTime.Elapsed()<<","<<ants[itter_best_pos].tour_length<<","<<global_best_ant->tour_length<<","<<error<<endl;
}
return 0;
}
|
7e24021bbc47592d4eb2b9621e7bba7cb3194e65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by jiashuai on 17-9-21.
//
#include <thrust/sort.h>
#include <thrust/system/hip/detail/par.h>
#include "thundersvm/kernel/smo_kernel.h"
#include <config.h>
#ifdef USE_ROCM
namespace svm_kernel {
__device__ int get_block_min(const float *values, int *index) {
int tid = threadIdx.x;
index[tid] = tid;
__syncthreads();
//block size is always the power of 2
for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) {
if (tid < offset) {
if (values[index[tid + offset]] <= values[index[tid]]) {
index[tid] = index[tid + offset];
}
}
__syncthreads();
}
return index[0];
}
__global__ void
c_smo_solve_kernel(const int *label, float_type *f_val, float_type *alpha, float_type *alpha_diff,
const int *working_set,
int ws_size,
float Cp, float Cn, const float *k_mat_rows, const float *k_mat_diag, int row_len,
float_type eps,
float_type *diff, int max_iter) {
//"row_len" equals to the number of instances in the original training dataset.
//allocate shared memory
extern __shared__ int shared_mem[];
int *f_idx2reduce = shared_mem; //temporary memory for reduction
float *f_val2reduce = (float *) &f_idx2reduce[ws_size]; //f values used for reduction.
float *alpha_i_diff = &f_val2reduce[ws_size]; //delta alpha_i
float *alpha_j_diff = &alpha_i_diff[1];
float *kd = &alpha_j_diff[1]; // diagonal elements for kernel matrix
//index, f value and alpha for each instance
int tid = threadIdx.x;
int wsi = working_set[tid];
kd[tid] = k_mat_diag[wsi];
float y = label[wsi];
float f = f_val[wsi];
float a = alpha[wsi];
float aold = a;
__syncthreads();
float local_eps;
int numOfIter = 0;
while (1) {
//select fUp and fLow
if (is_I_up(a, y, Cp, Cn))
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
int i = get_block_min(f_val2reduce, f_idx2reduce);
float up_value = f_val2reduce[i];
float kIwsI = k_mat_rows[row_len * i + wsi];//K[i, wsi]
__syncthreads();
if (is_I_low(a, y, Cp, Cn))
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1 = get_block_min(f_val2reduce, f_idx2reduce);
float low_value = -f_val2reduce[j1];
float local_diff = low_value - up_value;
if (numOfIter == 0) {
local_eps = max(eps, 0.1f * local_diff);
}
if (local_diff < local_eps) {
alpha[wsi] = a;
alpha_diff[tid] = -(a - aold) * y;
if (tid == 0) {
diff[0] = local_diff;
}
break;
}
__syncthreads();
//select j2 using second order heuristic
if (-up_value > -f && (is_I_low(a, y, Cp, Cn))) {
float aIJ = kd[i] + kd[tid] - 2 * kIwsI;
float bIJ = -up_value + f;
f_val2reduce[tid] = -bIJ * bIJ / aIJ;
} else
f_val2reduce[tid] = INFINITY;
int j2 = get_block_min(f_val2reduce, f_idx2reduce);
//update alpha
if (tid == i)
*alpha_i_diff = y > 0 ? Cp - a : a;
if (tid == j2)
*alpha_j_diff = min(y > 0 ? a : Cn - a, (-up_value + f) / (kd[i] + kd[j2] - 2 * kIwsI));
__syncthreads();
float l = min(*alpha_i_diff, *alpha_j_diff);
if (tid == i)
a += l * y;
if (tid == j2)
a -= l * y;
//update f
float kJ2wsI = k_mat_rows[row_len * j2 + wsi];//K[J2, wsi]
f -= l * (kJ2wsI - kIwsI);
numOfIter++;
if (numOfIter > max_iter) break;
}
}
__global__ void
nu_smo_solve_kernel(const int *label, float_type *f_values, float_type *alpha, float_type *alpha_diff,
const int *working_set,
int ws_size, float C, const float *k_mat_rows, const float *k_mat_diag, int row_len,
float_type eps,
float_type *diff, int max_iter) {
//"row_len" equals to the number of instances in the original training dataset.
//allocate shared memory
extern __shared__ int shared_mem[];
int *f_idx2reduce = shared_mem; //temporary memory for reduction
float *f_val2reduce = (float *) &f_idx2reduce[ws_size]; //f values used for reduction.
float *alpha_i_diff = &f_val2reduce[ws_size]; //delta alpha_i
float *alpha_j_diff = &alpha_i_diff[1];
float *kd = &alpha_j_diff[1]; // diagonal elements for kernel matrix
//index, f value and alpha for each instance
int tid = threadIdx.x;
int wsi = working_set[tid];
kd[tid] = k_mat_diag[wsi];
float y = label[wsi];
float f = f_values[wsi];
float a = alpha[wsi];
float aold = a;
__syncthreads();
float local_eps;
int numOfIter = 0;
while (1) {
//select I_up (y=+1)
if (y > 0 && a < C)
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
__syncthreads();
int ip = get_block_min(f_val2reduce, f_idx2reduce);
float up_value_p = f_val2reduce[ip];
float kIpwsI = k_mat_rows[row_len * ip + wsi];//K[i, wsi]
__syncthreads();
//select I_up (y=-1)
if (y < 0 && a > 0)
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
int in = get_block_min(f_val2reduce, f_idx2reduce);
float up_value_n = f_val2reduce[in];
float kInwsI = k_mat_rows[row_len * in + wsi];//K[i, wsi]
__syncthreads();
//select I_low (y=+1)
if (y > 0 && a > 0)
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1p = get_block_min(f_val2reduce, f_idx2reduce);
float low_value_p = -f_val2reduce[j1p];
__syncthreads();
//select I_low (y=-1)
if (y < 0 && a < C)
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1n = get_block_min(f_val2reduce, f_idx2reduce);
float low_value_n = -f_val2reduce[j1n];
float local_diff = max(low_value_p - up_value_p, low_value_n - up_value_n);
if (numOfIter == 0) {
local_eps = max(eps, 0.1f * local_diff);
if (tid == 0) {
diff[0] = local_diff;
}
}
if (local_diff < local_eps) {
alpha[wsi] = a;
alpha_diff[tid] = -(a - aold) * y;
break;
}
__syncthreads();
//select j2p using second order heuristic
if (-up_value_p > -f && y > 0 && a > 0) {
float aIJ = kd[ip] + kd[tid] - 2 * kIpwsI;
float bIJ = -up_value_p + f;
f_val2reduce[tid] = -bIJ * bIJ / aIJ;
} else
f_val2reduce[tid] = INFINITY;
int j2p = get_block_min(f_val2reduce, f_idx2reduce);
float f_val_j2p = f_val2reduce[j2p];
__syncthreads();
//select j2n using second order heuristic
if (-up_value_n > -f && y < 0 && a < C) {
float aIJ = kd[in] + kd[tid] - 2 * kInwsI;
float bIJ = -up_value_n + f;
f_val2reduce[tid] = -bIJ * bIJ / aIJ;
} else
f_val2reduce[tid] = INFINITY;
int j2n = get_block_min(f_val2reduce, f_idx2reduce);
int i, j2;
float up_value;
float kIwsI;
if (f_val_j2p < f_val2reduce[j2n]) {
i = ip;
j2 = j2p;
up_value = up_value_p;
kIwsI = kIpwsI;
} else {
i = in;
j2 = j2n;
kIwsI = kInwsI;
up_value = up_value_n;
}
//update alpha
if (tid == i)
*alpha_i_diff = y > 0 ? C - a : a;
if (tid == j2)
*alpha_j_diff = min(y > 0 ? a : C - a, (-up_value + f) / (kd[i] + kd[j2] - 2 * kIwsI));
__syncthreads();
float l = min(*alpha_i_diff, *alpha_j_diff);
if (tid == i)
a += l * y;
if (tid == j2)
a -= l * y;
//update f
float kJ2wsI = k_mat_rows[row_len * j2 + wsi];//K[J2, wsi]
f -= l * (kJ2wsI - kIwsI);
numOfIter++;
if (numOfIter > max_iter) break;
}
}
void
c_smo_solve(const SyncData<int> &y, SyncData<float_type> &f_val, SyncData<float_type> &alpha,
SyncData<float_type> &alpha_diff,
const SyncData<int> &working_set, float_type Cp, float_type Cn, const SyncData<float_type> &k_mat_rows,
const SyncData<float_type> &k_mat_diag, int row_len, float_type eps, SyncData<float_type> &diff,
int max_iter) {
size_t ws_size = working_set.size();
size_t smem_size = ws_size * sizeof(float_type) * 3 + 2 * sizeof(float);
c_smo_solve_kernel << < 1, ws_size, smem_size >> >
(y.device_data(), f_val.device_data(), alpha.device_data(), alpha_diff.device_data(),
working_set.device_data(), ws_size, Cp, Cn, k_mat_rows.device_data(), k_mat_diag.device_data(),
row_len, eps, diff.device_data(), max_iter);
}
void nu_smo_solve(const SyncData<int> &y, SyncData<float_type> &f_val, SyncData<float_type> &alpha,
SyncData<float_type> &alpha_diff,
const SyncData<int> &working_set, float_type C, const SyncData<float_type> &k_mat_rows,
const SyncData<float_type> &k_mat_diag, int row_len, float_type eps, SyncData<float_type> &diff,
int max_iter) {
size_t ws_size = working_set.size();
size_t smem_size = ws_size * sizeof(float_type) * 3 + 2 * sizeof(float);
nu_smo_solve_kernel << < 1, ws_size, smem_size >> >
(y.device_data(), f_val.device_data(), alpha.device_data(), alpha_diff.device_data(),
working_set.device_data(), ws_size, C, k_mat_rows.device_data(), k_mat_diag.device_data(),
row_len, eps, diff.device_data(), max_iter);
}
__global__ void
update_f_kernel(float_type *f, int ws_size, const float_type *alpha_diff, const float_type *k_mat_rows,
int n_instances) {
//"n_instances" equals to the number of rows of the whole kernel matrix for both SVC and SVR.
KERNEL_LOOP(idx, n_instances) {//one thread to update multiple fvalues.
float_type sum_diff = 0;
for (int i = 0; i < ws_size; ++i) {
float_type d = alpha_diff[i];
if (d != 0) {
sum_diff += d * k_mat_rows[i * n_instances + idx];
}
}
f[idx] -= sum_diff;
}
}
void
update_f(SyncData<float_type> &f, const SyncData<float_type> &alpha_diff, const SyncData<float_type> &k_mat_rows,
int n_instances) {
SAFE_KERNEL_LAUNCH(update_f_kernel, f.device_data(), alpha_diff.size(), alpha_diff.device_data(),
k_mat_rows.device_data(), n_instances);
}
void sort_f(SyncData<float_type> &f_val2sort, SyncData<int> &f_idx2sort) {
thrust::sort_by_key(thrust::hip::par, f_val2sort.device_data(), f_val2sort.device_data() + f_val2sort.size(),
f_idx2sort.device_data(), thrust::less<float_type>());
}
}
#endif
| 7e24021bbc47592d4eb2b9621e7bba7cb3194e65.cu | //
// Created by jiashuai on 17-9-21.
//
#include <thrust/sort.h>
#include <thrust/system/cuda/detail/par.h>
#include "thundersvm/kernel/smo_kernel.h"
#include <config.h>
#ifdef USE_CUDA
namespace svm_kernel {
__device__ int get_block_min(const float *values, int *index) {
int tid = threadIdx.x;
index[tid] = tid;
__syncthreads();
//block size is always the power of 2
for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) {
if (tid < offset) {
if (values[index[tid + offset]] <= values[index[tid]]) {
index[tid] = index[tid + offset];
}
}
__syncthreads();
}
return index[0];
}
__global__ void
c_smo_solve_kernel(const int *label, float_type *f_val, float_type *alpha, float_type *alpha_diff,
const int *working_set,
int ws_size,
float Cp, float Cn, const float *k_mat_rows, const float *k_mat_diag, int row_len,
float_type eps,
float_type *diff, int max_iter) {
//"row_len" equals to the number of instances in the original training dataset.
//allocate shared memory
extern __shared__ int shared_mem[];
int *f_idx2reduce = shared_mem; //temporary memory for reduction
float *f_val2reduce = (float *) &f_idx2reduce[ws_size]; //f values used for reduction.
float *alpha_i_diff = &f_val2reduce[ws_size]; //delta alpha_i
float *alpha_j_diff = &alpha_i_diff[1];
float *kd = &alpha_j_diff[1]; // diagonal elements for kernel matrix
//index, f value and alpha for each instance
int tid = threadIdx.x;
int wsi = working_set[tid];
kd[tid] = k_mat_diag[wsi];
float y = label[wsi];
float f = f_val[wsi];
float a = alpha[wsi];
float aold = a;
__syncthreads();
float local_eps;
int numOfIter = 0;
while (1) {
//select fUp and fLow
if (is_I_up(a, y, Cp, Cn))
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
int i = get_block_min(f_val2reduce, f_idx2reduce);
float up_value = f_val2reduce[i];
float kIwsI = k_mat_rows[row_len * i + wsi];//K[i, wsi]
__syncthreads();
if (is_I_low(a, y, Cp, Cn))
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1 = get_block_min(f_val2reduce, f_idx2reduce);
float low_value = -f_val2reduce[j1];
float local_diff = low_value - up_value;
if (numOfIter == 0) {
local_eps = max(eps, 0.1f * local_diff);
}
if (local_diff < local_eps) {
alpha[wsi] = a;
alpha_diff[tid] = -(a - aold) * y;
if (tid == 0) {
diff[0] = local_diff;
}
break;
}
__syncthreads();
//select j2 using second order heuristic
if (-up_value > -f && (is_I_low(a, y, Cp, Cn))) {
float aIJ = kd[i] + kd[tid] - 2 * kIwsI;
float bIJ = -up_value + f;
f_val2reduce[tid] = -bIJ * bIJ / aIJ;
} else
f_val2reduce[tid] = INFINITY;
int j2 = get_block_min(f_val2reduce, f_idx2reduce);
//update alpha
if (tid == i)
*alpha_i_diff = y > 0 ? Cp - a : a;
if (tid == j2)
*alpha_j_diff = min(y > 0 ? a : Cn - a, (-up_value + f) / (kd[i] + kd[j2] - 2 * kIwsI));
__syncthreads();
float l = min(*alpha_i_diff, *alpha_j_diff);
if (tid == i)
a += l * y;
if (tid == j2)
a -= l * y;
//update f
float kJ2wsI = k_mat_rows[row_len * j2 + wsi];//K[J2, wsi]
f -= l * (kJ2wsI - kIwsI);
numOfIter++;
if (numOfIter > max_iter) break;
}
}
__global__ void
nu_smo_solve_kernel(const int *label, float_type *f_values, float_type *alpha, float_type *alpha_diff,
const int *working_set,
int ws_size, float C, const float *k_mat_rows, const float *k_mat_diag, int row_len,
float_type eps,
float_type *diff, int max_iter) {
//"row_len" equals to the number of instances in the original training dataset.
//allocate shared memory
extern __shared__ int shared_mem[];
int *f_idx2reduce = shared_mem; //temporary memory for reduction
float *f_val2reduce = (float *) &f_idx2reduce[ws_size]; //f values used for reduction.
float *alpha_i_diff = &f_val2reduce[ws_size]; //delta alpha_i
float *alpha_j_diff = &alpha_i_diff[1];
float *kd = &alpha_j_diff[1]; // diagonal elements for kernel matrix
//index, f value and alpha for each instance
int tid = threadIdx.x;
int wsi = working_set[tid];
kd[tid] = k_mat_diag[wsi];
float y = label[wsi];
float f = f_values[wsi];
float a = alpha[wsi];
float aold = a;
__syncthreads();
float local_eps;
int numOfIter = 0;
while (1) {
//select I_up (y=+1)
if (y > 0 && a < C)
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
__syncthreads();
int ip = get_block_min(f_val2reduce, f_idx2reduce);
float up_value_p = f_val2reduce[ip];
float kIpwsI = k_mat_rows[row_len * ip + wsi];//K[i, wsi]
__syncthreads();
//select I_up (y=-1)
if (y < 0 && a > 0)
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
int in = get_block_min(f_val2reduce, f_idx2reduce);
float up_value_n = f_val2reduce[in];
float kInwsI = k_mat_rows[row_len * in + wsi];//K[i, wsi]
__syncthreads();
//select I_low (y=+1)
if (y > 0 && a > 0)
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1p = get_block_min(f_val2reduce, f_idx2reduce);
float low_value_p = -f_val2reduce[j1p];
__syncthreads();
//select I_low (y=-1)
if (y < 0 && a < C)
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1n = get_block_min(f_val2reduce, f_idx2reduce);
float low_value_n = -f_val2reduce[j1n];
float local_diff = max(low_value_p - up_value_p, low_value_n - up_value_n);
if (numOfIter == 0) {
local_eps = max(eps, 0.1f * local_diff);
if (tid == 0) {
diff[0] = local_diff;
}
}
if (local_diff < local_eps) {
alpha[wsi] = a;
alpha_diff[tid] = -(a - aold) * y;
break;
}
__syncthreads();
//select j2p using second order heuristic
if (-up_value_p > -f && y > 0 && a > 0) {
float aIJ = kd[ip] + kd[tid] - 2 * kIpwsI;
float bIJ = -up_value_p + f;
f_val2reduce[tid] = -bIJ * bIJ / aIJ;
} else
f_val2reduce[tid] = INFINITY;
int j2p = get_block_min(f_val2reduce, f_idx2reduce);
float f_val_j2p = f_val2reduce[j2p];
__syncthreads();
//select j2n using second order heuristic
if (-up_value_n > -f && y < 0 && a < C) {
float aIJ = kd[in] + kd[tid] - 2 * kInwsI;
float bIJ = -up_value_n + f;
f_val2reduce[tid] = -bIJ * bIJ / aIJ;
} else
f_val2reduce[tid] = INFINITY;
int j2n = get_block_min(f_val2reduce, f_idx2reduce);
int i, j2;
float up_value;
float kIwsI;
if (f_val_j2p < f_val2reduce[j2n]) {
i = ip;
j2 = j2p;
up_value = up_value_p;
kIwsI = kIpwsI;
} else {
i = in;
j2 = j2n;
kIwsI = kInwsI;
up_value = up_value_n;
}
//update alpha
if (tid == i)
*alpha_i_diff = y > 0 ? C - a : a;
if (tid == j2)
*alpha_j_diff = min(y > 0 ? a : C - a, (-up_value + f) / (kd[i] + kd[j2] - 2 * kIwsI));
__syncthreads();
float l = min(*alpha_i_diff, *alpha_j_diff);
if (tid == i)
a += l * y;
if (tid == j2)
a -= l * y;
//update f
float kJ2wsI = k_mat_rows[row_len * j2 + wsi];//K[J2, wsi]
f -= l * (kJ2wsI - kIwsI);
numOfIter++;
if (numOfIter > max_iter) break;
}
}
void
c_smo_solve(const SyncData<int> &y, SyncData<float_type> &f_val, SyncData<float_type> &alpha,
SyncData<float_type> &alpha_diff,
const SyncData<int> &working_set, float_type Cp, float_type Cn, const SyncData<float_type> &k_mat_rows,
const SyncData<float_type> &k_mat_diag, int row_len, float_type eps, SyncData<float_type> &diff,
int max_iter) {
size_t ws_size = working_set.size();
size_t smem_size = ws_size * sizeof(float_type) * 3 + 2 * sizeof(float);
c_smo_solve_kernel << < 1, ws_size, smem_size >> >
(y.device_data(), f_val.device_data(), alpha.device_data(), alpha_diff.device_data(),
working_set.device_data(), ws_size, Cp, Cn, k_mat_rows.device_data(), k_mat_diag.device_data(),
row_len, eps, diff.device_data(), max_iter);
}
void nu_smo_solve(const SyncData<int> &y, SyncData<float_type> &f_val, SyncData<float_type> &alpha,
SyncData<float_type> &alpha_diff,
const SyncData<int> &working_set, float_type C, const SyncData<float_type> &k_mat_rows,
const SyncData<float_type> &k_mat_diag, int row_len, float_type eps, SyncData<float_type> &diff,
int max_iter) {
size_t ws_size = working_set.size();
size_t smem_size = ws_size * sizeof(float_type) * 3 + 2 * sizeof(float);
nu_smo_solve_kernel << < 1, ws_size, smem_size >> >
(y.device_data(), f_val.device_data(), alpha.device_data(), alpha_diff.device_data(),
working_set.device_data(), ws_size, C, k_mat_rows.device_data(), k_mat_diag.device_data(),
row_len, eps, diff.device_data(), max_iter);
}
__global__ void
update_f_kernel(float_type *f, int ws_size, const float_type *alpha_diff, const float_type *k_mat_rows,
int n_instances) {
//"n_instances" equals to the number of rows of the whole kernel matrix for both SVC and SVR.
KERNEL_LOOP(idx, n_instances) {//one thread to update multiple fvalues.
float_type sum_diff = 0;
for (int i = 0; i < ws_size; ++i) {
float_type d = alpha_diff[i];
if (d != 0) {
sum_diff += d * k_mat_rows[i * n_instances + idx];
}
}
f[idx] -= sum_diff;
}
}
void
update_f(SyncData<float_type> &f, const SyncData<float_type> &alpha_diff, const SyncData<float_type> &k_mat_rows,
int n_instances) {
SAFE_KERNEL_LAUNCH(update_f_kernel, f.device_data(), alpha_diff.size(), alpha_diff.device_data(),
k_mat_rows.device_data(), n_instances);
}
void sort_f(SyncData<float_type> &f_val2sort, SyncData<int> &f_idx2sort) {
thrust::sort_by_key(thrust::cuda::par, f_val2sort.device_data(), f_val2sort.device_data() + f_val2sort.size(),
f_idx2sort.device_data(), thrust::less<float_type>());
}
}
#endif
|
7e37265feecba37cf4100685f983945a3fc2b930.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "BlockUtils.h"
////////////////////////////////////////////////////////////////////////////////
// tex_1d_2d_1d
// Copy a 1d texture to out_ptr, using 1d blocks and threads
////////////////////////////////////////////////////////////////////////////////
__global__ void copy_tex(float *out_ptr, hipTextureObject_t tex) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex1Dfetch<float>(tex, offset);
out_ptr[offset] = c;
}
////////////////////////////////////////////////////////////////////////////////
// TexObjFloat1D
// Wrap a 1d texture around a float array
////////////////////////////////////////////////////////////////////////////////
hipTextureObject_t *TexObjFloat1D(float *devPtr, int length)
{
// create texture object
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = devPtr;
resDesc.res.linear.desc.f = hipChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = length * sizeof(float);
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
hipTextureObject_t *tex = (hipTextureObject_t *)malloc(sizeof(hipTextureObject_t));
hipCreateTextureObject(tex, &resDesc, &texDesc, NULL);
return tex;
}
////////////////////////////////////////////////////////////////////////////////
// tex_1d_2d_1d
// A 1d texture, working on a 2d torus, using 1d blocks and threads
////////////////////////////////////////////////////////////////////////////////
__global__ void tex_1d_2d_1d(float *resOut, hipTextureObject_t texIn, int2 plySize,
float speed, float decay) {
int plyLength = plySize.x * plySize.y;
int step = blockDim.x * gridDim.x;
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < plyLength; i += step)
{
int col = i % plySize.x;
int row = (i - col) / plySize.x;
int colm = (col - 1 + plySize.x) % plySize.x;
int colp = (col + 1 + plySize.x) % plySize.x;
int rowm = (row - 1 + plySize.y) % plySize.y;
int rowp = (row + 1 + plySize.y) % plySize.y;
int left = colm + row * plySize.x;
int right = colp + row * plySize.x;
int top = col + rowm * plySize.x;
int bottom = col + rowp * plySize.x;
int center = col + row * plySize.x;
float t = tex1Dfetch<float>(texIn, top);
float l = tex1Dfetch<float>(texIn, left);
float c = tex1Dfetch<float>(texIn, center);
float r = tex1Dfetch<float>(texIn, right);
float b = tex1Dfetch<float>(texIn, bottom);
resOut[center] = c + speed * (t + b + r + l - decay * c);
}
}
| 7e37265feecba37cf4100685f983945a3fc2b930.cu | #include <stdlib.h>
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "BlockUtils.h"
////////////////////////////////////////////////////////////////////////////////
// tex_1d_2d_1d
// Copy a 1d texture to out_ptr, using 1d blocks and threads
////////////////////////////////////////////////////////////////////////////////
__global__ void copy_tex(float *out_ptr, cudaTextureObject_t tex) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex1Dfetch<float>(tex, offset);
out_ptr[offset] = c;
}
////////////////////////////////////////////////////////////////////////////////
// TexObjFloat1D
// Wrap a 1d texture around a float array
////////////////////////////////////////////////////////////////////////////////
cudaTextureObject_t *TexObjFloat1D(float *devPtr, int length)
{
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = devPtr;
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = length * sizeof(float);
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t *tex = (cudaTextureObject_t *)malloc(sizeof(cudaTextureObject_t));
cudaCreateTextureObject(tex, &resDesc, &texDesc, NULL);
return tex;
}
////////////////////////////////////////////////////////////////////////////////
// tex_1d_2d_1d
// A 1d texture, working on a 2d torus, using 1d blocks and threads
////////////////////////////////////////////////////////////////////////////////
__global__ void tex_1d_2d_1d(float *resOut, cudaTextureObject_t texIn, int2 plySize,
float speed, float decay) {
int plyLength = plySize.x * plySize.y;
int step = blockDim.x * gridDim.x;
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < plyLength; i += step)
{
int col = i % plySize.x;
int row = (i - col) / plySize.x;
int colm = (col - 1 + plySize.x) % plySize.x;
int colp = (col + 1 + plySize.x) % plySize.x;
int rowm = (row - 1 + plySize.y) % plySize.y;
int rowp = (row + 1 + plySize.y) % plySize.y;
int left = colm + row * plySize.x;
int right = colp + row * plySize.x;
int top = col + rowm * plySize.x;
int bottom = col + rowp * plySize.x;
int center = col + row * plySize.x;
float t = tex1Dfetch<float>(texIn, top);
float l = tex1Dfetch<float>(texIn, left);
float c = tex1Dfetch<float>(texIn, center);
float r = tex1Dfetch<float>(texIn, right);
float b = tex1Dfetch<float>(texIn, bottom);
resOut[center] = c + speed * (t + b + r + l - decay * c);
}
}
|
7e496511766842d393bce13ba491a816bd347f9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/device/common_cub.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void
_ReduceLossGrad(const int nthreads, const T scale, const T* dy, T* dx) {
#if __CUDA_ARCH__ >= 350
const T alpha = __ldg(dy) * scale;
#else
const T alpha = dy[0] * scale;
#endif
CUDA_1D_KERNEL_LOOP(i, nthreads) {
dx[i] *= alpha;
}
}
__global__ void _ReduceLossGrad(
const int nthreads,
const float scale,
const half* dy,
half* dx) {
#if __CUDA_ARCH__ >= 350
const float alpha = __half2float(__ldg(dy)) * scale;
#else
const float alpha = __half2float(dy[0]) * scale;
#endif
CUDA_1D_KERNEL_LOOP(i, nthreads) {
dx[i] = __float2half(__half2float(dx[i]) * alpha);
}
}
template <typename T>
__global__ void
_ReduceLossGrad(const int nthreads, const T* normalizer, const T* dy, T* dx) {
#if __CUDA_ARCH__ >= 350
const T alpha = __ldg(dy) / max(__ldg(normalizer), T(1));
#else
const T alpha = dy[0] / max(normalizer[0], T(1));
#endif
CUDA_1D_KERNEL_LOOP(i, nthreads) {
dx[i] *= alpha;
}
}
template <>
__global__ void _ReduceLossGrad<half>(
const int nthreads,
const half* normalizer,
const half* dy,
half* dx) {
#if __CUDA_ARCH__ >= 350
const float alpha =
__half2float(__ldg(dy)) / max(__half2float(__ldg(normalizer)), 1.f);
#else
const float alpha =
__half2float(dy[0]) / max(__half2float(normalizer[0]), 1.f);
#endif
CUDA_1D_KERNEL_LOOP(i, nthreads) {
dx[i] = __float2half(__half2float(dx[i]) * alpha);
}
}
template <typename T>
__global__ void _BroadcastLossGrad(
const int nthreads,
const int dim1,
const int dim2,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
#if __CUDA_ARCH__ >= 350
dx[i] *= __ldg(dy + (i / dim1) * dim2 + (i % dim2));
#else
dx[i] *= dy[(i / dim1) * dim2 + (i % dim2)];
#endif
}
}
template <>
__global__ void _BroadcastLossGrad<half>(
const int nthreads,
const int dim1,
const int dim2,
const half* dy,
half* dx) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
#if __CUDA_ARCH__ >= 350
dx[i] = __float2half(
__half2float(dx[i]) *
__half2float(__ldg(dy + (i / dim1) * dim2 + (i % dim2))));
#else
dx[i] = __float2half(
__half2float(dx[i]) * __half2float(dy[(i / dim1) * dim2 + (i % dim2)]));
#endif
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void ReduceLossGrad<float16, CUDAContext>(
const int count,
const int num_masks,
const float normalizer,
const float16* dy,
const float16* mask,
float16* dx,
CUDAContext* ctx) {
if (num_masks > 0 && normalizer < 0.f) {
auto* normalizer_v2 = const_cast<float16*>(mask + num_masks);
math::Sum(num_masks, 1.f, mask, normalizer_v2, ctx);
hipLaunchKernelGGL(( _ReduceLossGrad),
dim3(CUDA_BLOCKS(count)),
dim3(CUDA_THREADS),
0,
ctx->cuda_stream(),
count,
reinterpret_cast<const half*>(normalizer_v2),
reinterpret_cast<const half*>(dy),
reinterpret_cast<half*>(dx));
} else {
hipLaunchKernelGGL(( _ReduceLossGrad),
dim3(CUDA_BLOCKS(count)),
dim3(CUDA_THREADS),
0,
ctx->cuda_stream(),
count,
1.f / ::max(0.5f, normalizer),
reinterpret_cast<const half*>(dy),
reinterpret_cast<half*>(dx));
}
}
template <>
void BroadcastLossGrad<float16, CUDAContext>(
const int outer_dim,
const int inner_dim,
const int axis_dim,
const float16* dy,
float16* dx,
CUDAContext* ctx) {
const auto nthreads = outer_dim * axis_dim * inner_dim;
hipLaunchKernelGGL(( _BroadcastLossGrad),
dim3(CUDA_BLOCKS(nthreads)),
dim3(CUDA_THREADS),
0,
ctx->cuda_stream(),
nthreads,
axis_dim * inner_dim,
inner_dim,
reinterpret_cast<const half*>(dy),
reinterpret_cast<half*>(dx));
}
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void ReduceLoss<T, CUDAContext>( \
const int count, \
const int num_masks, \
const float normalizer, \
const T* x, \
const T* mask, \
T* y, \
CUDAContext* ctx) { \
if (num_masks > 0 && normalizer < 0.f) { \
auto* normalizer_v2 = const_cast<T*>(mask + num_masks); \
math::Sum(num_masks, 1.f, mask, normalizer_v2, ctx); \
math::Sum(count, 1.f, x, y, ctx); \
math::Div(1, y, normalizer_v2, y, ctx); \
} else { \
math::Sum(count, 1.f / ::max(1.f, normalizer), x, y, ctx); \
} \
}
#define DEFINE_GRAD_KERNEL_LAUNCHER(T) \
template <> \
void ReduceLossGrad<T, CUDAContext>( \
const int count, \
const int num_masks, \
const float normalizer, \
const T* dy, \
const T* mask, \
T* dx, \
CUDAContext* ctx) { \
if (num_masks > 0 && normalizer < 0.f) { \
auto* normalizer_v2 = const_cast<T*>(mask + num_masks); \
math::Sum(num_masks, 1.f, mask, normalizer_v2, ctx); \
hipLaunchKernelGGL(( _ReduceLossGrad), \
CUDA_BLOCKS(count), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), count, normalizer_v2, dy, dx); \
} else { \
hipLaunchKernelGGL(( _ReduceLossGrad), \
CUDA_BLOCKS(count), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), \
count, T(1.f / ::max(0.5f, normalizer)), dy, dx); \
} \
} \
template <> \
void BroadcastLossGrad<T, CUDAContext>( \
const int outer_dim, \
const int inner_dim, \
const int axis_dim, \
const T* dy, \
T* dx, \
CUDAContext* ctx) { \
const auto nthreads = outer_dim * axis_dim * inner_dim; \
hipLaunchKernelGGL(( _BroadcastLossGrad), \
CUDA_BLOCKS(nthreads), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), \
nthreads, axis_dim * inner_dim, inner_dim, dy, dx); \
}
DEFINE_KERNEL_LAUNCHER(float16);
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
DEFINE_GRAD_KERNEL_LAUNCHER(float);
DEFINE_GRAD_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
#undef DEFINE_GRAD_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_ROCM
| 7e496511766842d393bce13ba491a816bd347f9e.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/device/common_cub.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void
_ReduceLossGrad(const int nthreads, const T scale, const T* dy, T* dx) {
#if __CUDA_ARCH__ >= 350
const T alpha = __ldg(dy) * scale;
#else
const T alpha = dy[0] * scale;
#endif
CUDA_1D_KERNEL_LOOP(i, nthreads) {
dx[i] *= alpha;
}
}
__global__ void _ReduceLossGrad(
const int nthreads,
const float scale,
const half* dy,
half* dx) {
#if __CUDA_ARCH__ >= 350
const float alpha = __half2float(__ldg(dy)) * scale;
#else
const float alpha = __half2float(dy[0]) * scale;
#endif
CUDA_1D_KERNEL_LOOP(i, nthreads) {
dx[i] = __float2half(__half2float(dx[i]) * alpha);
}
}
template <typename T>
__global__ void
_ReduceLossGrad(const int nthreads, const T* normalizer, const T* dy, T* dx) {
#if __CUDA_ARCH__ >= 350
const T alpha = __ldg(dy) / max(__ldg(normalizer), T(1));
#else
const T alpha = dy[0] / max(normalizer[0], T(1));
#endif
CUDA_1D_KERNEL_LOOP(i, nthreads) {
dx[i] *= alpha;
}
}
template <>
__global__ void _ReduceLossGrad<half>(
const int nthreads,
const half* normalizer,
const half* dy,
half* dx) {
#if __CUDA_ARCH__ >= 350
const float alpha =
__half2float(__ldg(dy)) / max(__half2float(__ldg(normalizer)), 1.f);
#else
const float alpha =
__half2float(dy[0]) / max(__half2float(normalizer[0]), 1.f);
#endif
CUDA_1D_KERNEL_LOOP(i, nthreads) {
dx[i] = __float2half(__half2float(dx[i]) * alpha);
}
}
template <typename T>
__global__ void _BroadcastLossGrad(
const int nthreads,
const int dim1,
const int dim2,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
#if __CUDA_ARCH__ >= 350
dx[i] *= __ldg(dy + (i / dim1) * dim2 + (i % dim2));
#else
dx[i] *= dy[(i / dim1) * dim2 + (i % dim2)];
#endif
}
}
template <>
__global__ void _BroadcastLossGrad<half>(
const int nthreads,
const int dim1,
const int dim2,
const half* dy,
half* dx) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
#if __CUDA_ARCH__ >= 350
dx[i] = __float2half(
__half2float(dx[i]) *
__half2float(__ldg(dy + (i / dim1) * dim2 + (i % dim2))));
#else
dx[i] = __float2half(
__half2float(dx[i]) * __half2float(dy[(i / dim1) * dim2 + (i % dim2)]));
#endif
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void ReduceLossGrad<float16, CUDAContext>(
const int count,
const int num_masks,
const float normalizer,
const float16* dy,
const float16* mask,
float16* dx,
CUDAContext* ctx) {
if (num_masks > 0 && normalizer < 0.f) {
auto* normalizer_v2 = const_cast<float16*>(mask + num_masks);
math::Sum(num_masks, 1.f, mask, normalizer_v2, ctx);
_ReduceLossGrad<<<
CUDA_BLOCKS(count),
CUDA_THREADS,
0,
ctx->cuda_stream()>>>(
count,
reinterpret_cast<const half*>(normalizer_v2),
reinterpret_cast<const half*>(dy),
reinterpret_cast<half*>(dx));
} else {
_ReduceLossGrad<<<
CUDA_BLOCKS(count),
CUDA_THREADS,
0,
ctx->cuda_stream()>>>(
count,
1.f / std::max(0.5f, normalizer),
reinterpret_cast<const half*>(dy),
reinterpret_cast<half*>(dx));
}
}
template <>
void BroadcastLossGrad<float16, CUDAContext>(
const int outer_dim,
const int inner_dim,
const int axis_dim,
const float16* dy,
float16* dx,
CUDAContext* ctx) {
const auto nthreads = outer_dim * axis_dim * inner_dim;
_BroadcastLossGrad<<<
CUDA_BLOCKS(nthreads),
CUDA_THREADS,
0,
ctx->cuda_stream()>>>(
nthreads,
axis_dim * inner_dim,
inner_dim,
reinterpret_cast<const half*>(dy),
reinterpret_cast<half*>(dx));
}
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void ReduceLoss<T, CUDAContext>( \
const int count, \
const int num_masks, \
const float normalizer, \
const T* x, \
const T* mask, \
T* y, \
CUDAContext* ctx) { \
if (num_masks > 0 && normalizer < 0.f) { \
auto* normalizer_v2 = const_cast<T*>(mask + num_masks); \
math::Sum(num_masks, 1.f, mask, normalizer_v2, ctx); \
math::Sum(count, 1.f, x, y, ctx); \
math::Div(1, y, normalizer_v2, y, ctx); \
} else { \
math::Sum(count, 1.f / std::max(1.f, normalizer), x, y, ctx); \
} \
}
#define DEFINE_GRAD_KERNEL_LAUNCHER(T) \
template <> \
void ReduceLossGrad<T, CUDAContext>( \
const int count, \
const int num_masks, \
const float normalizer, \
const T* dy, \
const T* mask, \
T* dx, \
CUDAContext* ctx) { \
if (num_masks > 0 && normalizer < 0.f) { \
auto* normalizer_v2 = const_cast<T*>(mask + num_masks); \
math::Sum(num_masks, 1.f, mask, normalizer_v2, ctx); \
_ReduceLossGrad<<< \
CUDA_BLOCKS(count), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>(count, normalizer_v2, dy, dx); \
} else { \
_ReduceLossGrad<<< \
CUDA_BLOCKS(count), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>( \
count, T(1.f / std::max(0.5f, normalizer)), dy, dx); \
} \
} \
template <> \
void BroadcastLossGrad<T, CUDAContext>( \
const int outer_dim, \
const int inner_dim, \
const int axis_dim, \
const T* dy, \
T* dx, \
CUDAContext* ctx) { \
const auto nthreads = outer_dim * axis_dim * inner_dim; \
_BroadcastLossGrad<<< \
CUDA_BLOCKS(nthreads), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>( \
nthreads, axis_dim * inner_dim, inner_dim, dy, dx); \
}
DEFINE_KERNEL_LAUNCHER(float16);
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
DEFINE_GRAD_KERNEL_LAUNCHER(float);
DEFINE_GRAD_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
#undef DEFINE_GRAD_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_CUDA
|
aaf9b42d2b64354fb2242e70166cf7efe16e1c76.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: jglaser
/*! \file SFCPackUpdaterGPU.cu
\brief Defines GPU kernel code for generating the space-filling curve sorted order on the GPU. Used by SFCPackUpdaterGPU.
*/
#include "SFCPackUpdaterGPU.cuh"
#include "hoomd/extern/kernels/mergesort.cuh"
//! Kernel to bin particles
template<bool twod>
__global__ void gpu_sfc_bin_particles_kernel(unsigned int N,
const Scalar4 *d_pos,
unsigned int *d_particle_bins,
const unsigned int *d_traversal_order,
unsigned int n_grid,
unsigned int *d_sorted_order,
const BoxDim box)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N) return;
// fetch particle position
Scalar4 postype = d_pos[idx];
Scalar3 p = make_scalar3(postype.x, postype.y, postype.z);
Scalar3 f = box.makeFraction(p);
int ib = (unsigned int)(f.x * n_grid) % n_grid;
int jb = (unsigned int)(f.y * n_grid) % n_grid;
int kb = (unsigned int)(f.z * n_grid) % n_grid;
// if the particle is slightly outside, move back into grid
if (ib < 0) ib = 0;
if (ib >= n_grid) ib = n_grid - 1;
if (jb < 0) jb = 0;
if (jb >= n_grid) jb = n_grid - 1;
if (kb < 0) kb = 0;
if (kb >= n_grid) kb = n_grid - 1;
// record its bin
unsigned int bin;
if (twod)
{
// do not use Hilbert curve in 2D
bin = ib*n_grid + jb;
d_particle_bins[idx] = bin;
}
else
{
bin = ib*(n_grid*n_grid) + jb * n_grid + kb;
d_particle_bins[idx] = d_traversal_order[bin];
}
// store index of ptl
d_sorted_order[idx] = idx;
}
/*! \param N number of local particles
\param d_pos Device array of positions
\param d_particle_bins Device array of particle bins
\param d_traversal_order Device array of Hilbert-curve bins
\param n_grid Number of grid elements along one edge
\param d_sorted_order Sorted order of particles
\param box Box dimensions
\param twod If true, bin particles in two dimensions
*/
void gpu_generate_sorted_order(unsigned int N,
const Scalar4 *d_pos,
unsigned int *d_particle_bins,
unsigned int *d_traversal_order,
unsigned int n_grid,
unsigned int *d_sorted_order,
const BoxDim& box,
bool twod,
mgpu::ContextPtr mgpu_context)
{
// maybe need to autotune, but SFCPackUpdater is called infrequently
unsigned int block_size = 512;
unsigned int n_blocks = N/block_size + 1;
if (twod)
hipLaunchKernelGGL(( gpu_sfc_bin_particles_kernel<true>), dim3(n_blocks), dim3(block_size), 0, 0, N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box);
else
hipLaunchKernelGGL(( gpu_sfc_bin_particles_kernel<false>), dim3(n_blocks), dim3(block_size), 0, 0, N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box);
// Sort particles
if (N)
mgpu::MergesortPairs(d_particle_bins, d_sorted_order, N, *mgpu_context);
}
//! Kernel to apply sorted order
__global__ void gpu_apply_sorted_order_kernel(
unsigned int N,
unsigned int n_ghost,
const unsigned int *d_sorted_order,
const Scalar4 *d_pos,
Scalar4 *d_pos_alt,
const Scalar4 *d_vel,
Scalar4 *d_vel_alt,
const Scalar3 *d_accel,
Scalar3 *d_accel_alt,
const Scalar *d_charge,
Scalar *d_charge_alt,
const Scalar *d_diameter,
Scalar *d_diameter_alt,
const int3 *d_image,
int3 *d_image_alt,
const unsigned int *d_body,
unsigned int *d_body_alt,
const unsigned int *d_tag,
unsigned int *d_tag_alt,
const Scalar4 *d_orientation,
Scalar4 *d_orientation_alt,
const Scalar4 *d_angmom,
Scalar4 *d_angmom_alt,
const Scalar3 *d_inertia,
Scalar3 *d_inertia_alt,
const Scalar *d_net_virial,
Scalar *d_net_virial_alt,
unsigned int virial_pitch,
const Scalar4 *d_net_force,
Scalar4 *d_net_force_alt,
const Scalar4 *d_net_torque,
Scalar4 *d_net_torque_alt,
unsigned int *d_rtag)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N+n_ghost) return;
// apply sorted order only for local ptls
unsigned int old_idx = (idx < N ? d_sorted_order[idx] : idx);
// permute and copy over particle data
d_pos_alt[idx] = d_pos[old_idx];
d_vel_alt[idx] = d_vel[old_idx];
d_accel_alt[idx] = d_accel[old_idx];
d_charge_alt[idx] = d_charge[old_idx];
d_diameter_alt[idx] = d_diameter[old_idx];
d_image_alt[idx] = d_image[old_idx];
d_body_alt[idx] = d_body[old_idx];
unsigned int tag = d_tag[old_idx];
d_tag_alt[idx] = tag;
d_orientation_alt[idx] = d_orientation[old_idx];
d_angmom_alt[idx] = d_angmom[old_idx];
d_inertia_alt[idx] = d_inertia[old_idx];
d_net_virial_alt[0*virial_pitch+idx] = d_net_virial[0*virial_pitch+old_idx];
d_net_virial_alt[1*virial_pitch+idx] = d_net_virial[1*virial_pitch+old_idx];
d_net_virial_alt[2*virial_pitch+idx] = d_net_virial[2*virial_pitch+old_idx];
d_net_virial_alt[3*virial_pitch+idx] = d_net_virial[3*virial_pitch+old_idx];
d_net_virial_alt[4*virial_pitch+idx] = d_net_virial[4*virial_pitch+old_idx];
d_net_virial_alt[5*virial_pitch+idx] = d_net_virial[5*virial_pitch+old_idx];
d_net_force_alt[idx] = d_net_force[old_idx];
d_net_torque_alt[idx] = d_net_torque[old_idx];
if (idx < N)
{
// update rtag to point to particle position in new arrays
d_rtag[tag] = idx;
}
}
void gpu_apply_sorted_order(
unsigned int N,
unsigned int n_ghost,
const unsigned int *d_sorted_order,
const Scalar4 *d_pos,
Scalar4 *d_pos_alt,
const Scalar4 *d_vel,
Scalar4 *d_vel_alt,
const Scalar3 *d_accel,
Scalar3 *d_accel_alt,
const Scalar *d_charge,
Scalar *d_charge_alt,
const Scalar *d_diameter,
Scalar *d_diameter_alt,
const int3 *d_image,
int3 *d_image_alt,
const unsigned int *d_body,
unsigned int *d_body_alt,
const unsigned int *d_tag,
unsigned int *d_tag_alt,
const Scalar4 *d_orientation,
Scalar4 *d_orientation_alt,
const Scalar4 *d_angmom,
Scalar4 *d_angmom_alt,
const Scalar3 *d_inertia,
Scalar3 *d_inertia_alt,
const Scalar *d_net_virial,
Scalar *d_net_virial_alt,
unsigned int virial_pitch,
const Scalar4 *d_net_force,
Scalar4 *d_net_force_alt,
const Scalar4 *d_net_torque,
Scalar4 *d_net_torque_alt,
unsigned int *d_rtag
)
{
unsigned int block_size = 512;
unsigned int n_blocks = (N+n_ghost)/block_size + 1;
hipLaunchKernelGGL(( gpu_apply_sorted_order_kernel), dim3(n_blocks), dim3(block_size), 0, 0, N,
n_ghost,
d_sorted_order,
d_pos,
d_pos_alt,
d_vel,
d_vel_alt,
d_accel,
d_accel_alt,
d_charge,
d_charge_alt,
d_diameter,
d_diameter_alt,
d_image,
d_image_alt,
d_body,
d_body_alt,
d_tag,
d_tag_alt,
d_orientation,
d_orientation_alt,
d_angmom,
d_angmom_alt,
d_inertia,
d_inertia_alt,
d_net_virial,
d_net_virial_alt,
virial_pitch,
d_net_force,
d_net_force_alt,
d_net_torque,
d_net_torque_alt,
d_rtag);
}
| aaf9b42d2b64354fb2242e70166cf7efe16e1c76.cu | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: jglaser
/*! \file SFCPackUpdaterGPU.cu
\brief Defines GPU kernel code for generating the space-filling curve sorted order on the GPU. Used by SFCPackUpdaterGPU.
*/
#include "SFCPackUpdaterGPU.cuh"
#include "hoomd/extern/kernels/mergesort.cuh"
//! Kernel to bin particles
template<bool twod>
__global__ void gpu_sfc_bin_particles_kernel(unsigned int N,
const Scalar4 *d_pos,
unsigned int *d_particle_bins,
const unsigned int *d_traversal_order,
unsigned int n_grid,
unsigned int *d_sorted_order,
const BoxDim box)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N) return;
// fetch particle position
Scalar4 postype = d_pos[idx];
Scalar3 p = make_scalar3(postype.x, postype.y, postype.z);
Scalar3 f = box.makeFraction(p);
int ib = (unsigned int)(f.x * n_grid) % n_grid;
int jb = (unsigned int)(f.y * n_grid) % n_grid;
int kb = (unsigned int)(f.z * n_grid) % n_grid;
// if the particle is slightly outside, move back into grid
if (ib < 0) ib = 0;
if (ib >= n_grid) ib = n_grid - 1;
if (jb < 0) jb = 0;
if (jb >= n_grid) jb = n_grid - 1;
if (kb < 0) kb = 0;
if (kb >= n_grid) kb = n_grid - 1;
// record its bin
unsigned int bin;
if (twod)
{
// do not use Hilbert curve in 2D
bin = ib*n_grid + jb;
d_particle_bins[idx] = bin;
}
else
{
bin = ib*(n_grid*n_grid) + jb * n_grid + kb;
d_particle_bins[idx] = d_traversal_order[bin];
}
// store index of ptl
d_sorted_order[idx] = idx;
}
/*! \param N number of local particles
\param d_pos Device array of positions
\param d_particle_bins Device array of particle bins
\param d_traversal_order Device array of Hilbert-curve bins
\param n_grid Number of grid elements along one edge
\param d_sorted_order Sorted order of particles
\param box Box dimensions
\param twod If true, bin particles in two dimensions
*/
void gpu_generate_sorted_order(unsigned int N,
const Scalar4 *d_pos,
unsigned int *d_particle_bins,
unsigned int *d_traversal_order,
unsigned int n_grid,
unsigned int *d_sorted_order,
const BoxDim& box,
bool twod,
mgpu::ContextPtr mgpu_context)
{
// maybe need to autotune, but SFCPackUpdater is called infrequently
unsigned int block_size = 512;
unsigned int n_blocks = N/block_size + 1;
if (twod)
gpu_sfc_bin_particles_kernel<true><<<n_blocks, block_size>>>(N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box);
else
gpu_sfc_bin_particles_kernel<false><<<n_blocks, block_size>>>(N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box);
// Sort particles
if (N)
mgpu::MergesortPairs(d_particle_bins, d_sorted_order, N, *mgpu_context);
}
//! Kernel to apply sorted order
__global__ void gpu_apply_sorted_order_kernel(
unsigned int N,
unsigned int n_ghost,
const unsigned int *d_sorted_order,
const Scalar4 *d_pos,
Scalar4 *d_pos_alt,
const Scalar4 *d_vel,
Scalar4 *d_vel_alt,
const Scalar3 *d_accel,
Scalar3 *d_accel_alt,
const Scalar *d_charge,
Scalar *d_charge_alt,
const Scalar *d_diameter,
Scalar *d_diameter_alt,
const int3 *d_image,
int3 *d_image_alt,
const unsigned int *d_body,
unsigned int *d_body_alt,
const unsigned int *d_tag,
unsigned int *d_tag_alt,
const Scalar4 *d_orientation,
Scalar4 *d_orientation_alt,
const Scalar4 *d_angmom,
Scalar4 *d_angmom_alt,
const Scalar3 *d_inertia,
Scalar3 *d_inertia_alt,
const Scalar *d_net_virial,
Scalar *d_net_virial_alt,
unsigned int virial_pitch,
const Scalar4 *d_net_force,
Scalar4 *d_net_force_alt,
const Scalar4 *d_net_torque,
Scalar4 *d_net_torque_alt,
unsigned int *d_rtag)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N+n_ghost) return;
// apply sorted order only for local ptls
unsigned int old_idx = (idx < N ? d_sorted_order[idx] : idx);
// permute and copy over particle data
d_pos_alt[idx] = d_pos[old_idx];
d_vel_alt[idx] = d_vel[old_idx];
d_accel_alt[idx] = d_accel[old_idx];
d_charge_alt[idx] = d_charge[old_idx];
d_diameter_alt[idx] = d_diameter[old_idx];
d_image_alt[idx] = d_image[old_idx];
d_body_alt[idx] = d_body[old_idx];
unsigned int tag = d_tag[old_idx];
d_tag_alt[idx] = tag;
d_orientation_alt[idx] = d_orientation[old_idx];
d_angmom_alt[idx] = d_angmom[old_idx];
d_inertia_alt[idx] = d_inertia[old_idx];
d_net_virial_alt[0*virial_pitch+idx] = d_net_virial[0*virial_pitch+old_idx];
d_net_virial_alt[1*virial_pitch+idx] = d_net_virial[1*virial_pitch+old_idx];
d_net_virial_alt[2*virial_pitch+idx] = d_net_virial[2*virial_pitch+old_idx];
d_net_virial_alt[3*virial_pitch+idx] = d_net_virial[3*virial_pitch+old_idx];
d_net_virial_alt[4*virial_pitch+idx] = d_net_virial[4*virial_pitch+old_idx];
d_net_virial_alt[5*virial_pitch+idx] = d_net_virial[5*virial_pitch+old_idx];
d_net_force_alt[idx] = d_net_force[old_idx];
d_net_torque_alt[idx] = d_net_torque[old_idx];
if (idx < N)
{
// update rtag to point to particle position in new arrays
d_rtag[tag] = idx;
}
}
void gpu_apply_sorted_order(
unsigned int N,
unsigned int n_ghost,
const unsigned int *d_sorted_order,
const Scalar4 *d_pos,
Scalar4 *d_pos_alt,
const Scalar4 *d_vel,
Scalar4 *d_vel_alt,
const Scalar3 *d_accel,
Scalar3 *d_accel_alt,
const Scalar *d_charge,
Scalar *d_charge_alt,
const Scalar *d_diameter,
Scalar *d_diameter_alt,
const int3 *d_image,
int3 *d_image_alt,
const unsigned int *d_body,
unsigned int *d_body_alt,
const unsigned int *d_tag,
unsigned int *d_tag_alt,
const Scalar4 *d_orientation,
Scalar4 *d_orientation_alt,
const Scalar4 *d_angmom,
Scalar4 *d_angmom_alt,
const Scalar3 *d_inertia,
Scalar3 *d_inertia_alt,
const Scalar *d_net_virial,
Scalar *d_net_virial_alt,
unsigned int virial_pitch,
const Scalar4 *d_net_force,
Scalar4 *d_net_force_alt,
const Scalar4 *d_net_torque,
Scalar4 *d_net_torque_alt,
unsigned int *d_rtag
)
{
unsigned int block_size = 512;
unsigned int n_blocks = (N+n_ghost)/block_size + 1;
gpu_apply_sorted_order_kernel<<<n_blocks, block_size>>>(N,
n_ghost,
d_sorted_order,
d_pos,
d_pos_alt,
d_vel,
d_vel_alt,
d_accel,
d_accel_alt,
d_charge,
d_charge_alt,
d_diameter,
d_diameter_alt,
d_image,
d_image_alt,
d_body,
d_body_alt,
d_tag,
d_tag_alt,
d_orientation,
d_orientation_alt,
d_angmom,
d_angmom_alt,
d_inertia,
d_inertia_alt,
d_net_virial,
d_net_virial_alt,
virial_pitch,
d_net_force,
d_net_force_alt,
d_net_torque,
d_net_torque_alt,
d_rtag);
}
|
288c1d1830911cf556bee2d3dde34069f5ba0341.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include <thrust/sort.h>
//Only include parameters file if we're not creating the shared library
#ifndef PYTHON
#include "params.h"
#endif
__device__ int smoothkernel(int n, DTYPE * x, DTYPE * y, DTYPE * w, DTYPE span, int iper, DTYPE vsmlsq, DTYPE * smo, DTYPE * acvr)
{
int i,j,jper,in,out,ibw,it; //j0,
DTYPE xto,xti;
DTYPE wt,fbo,fbw=0.,xm=0.,ym=0.,tmp,var=0.,cvar=0.,a,h; //,sy
jper=abs(iper);
ibw=0.5*span*n+0.5;
if (ibw<2) ibw=2;
it=2*ibw+1;
for (i=0;i<it;i++) {
j=i;
if (jper==2) j=i-ibw-1;
if (j<0) {
j+=n;
xti=x[j]-1.0;
} else xti=x[j];
wt=w[j];
fbo=fbw;
fbw+=wt;
if (fbw>0) {
xm=(fbo*xm+wt*xti)/fbw;
ym=(fbo*ym+wt*y[j])/fbw;
}
if (fbo>0) {
tmp=fbw*wt*(xti-xm)/fbo;
var+=tmp*(xti-xm);
cvar+=tmp*(y[j]-ym);
}
}
for (j=0;j<n;j++) {
out=j-ibw-1;
in=j+ibw;
if (jper==2 || (out>=0 && in<n)) {
if (in>n-1) {
in-=n;
xti=x[in]+1.0;
} else xti=x[in];
if (out<0) {
out+=n;
xto=x[out]-1.0;
} else xto=x[out];
wt=w[out];
fbo=fbw;
fbw-=wt;
if (fbw>0) {
tmp=fbo*wt*(xto-xm)/fbw;
var-=tmp*(xto-xm);
cvar-=tmp*(y[out]-ym);
}
if (fbw>0) {
xm=(fbo*xm-wt*xto)/fbw;
ym=(fbo*ym-wt*y[out])/fbw;
}
wt=w[in];
fbo=fbw;
fbw+=wt;
if (fbw>0) {
xm=(fbo*xm+wt*xti)/fbw;
ym=(fbo*ym+wt*y[in])/fbw;
}
if (fbo>0) {
tmp=fbw*wt*(xti-xm)/fbo;
var+=tmp*(xti-xm);
cvar+=tmp*(y[in]-ym);
}
}
a=0.0;
if (var>vsmlsq) a=cvar/var;
smo[j]=a*(x[j]-xm)+ym;
if (iper>0) {
h=0.0;
if (fbw>0) h=1.0/fbw;
if (var>vsmlsq) h+=(x[j]-xm)*(x[j]-xm)/var;
acvr[j]=0.0;
a=1.0-w[j]*h;
if (a>0) acvr[j]=fabs(y[j]-smo[j])/a;
else if (j>0) acvr[j]=acvr[j-1];
}
}
//Nat: can rm -- to deal with equal time values
// for (j=0;j<n;j++) {
// sy=smo[j]*w[j];
// fbw=w[j];
// j0=j;
// while (j<n-1 && x[j+1]<=x[j]) {
// j+=1;
// sy+=w[j]*smo[j];
// fbw+=w[j];
// }
// if (j>j0) {
// a=0.0;
// if (fbw>0) a=sy/fbw;
// for (i=j0;i<=j;i++) smo[i]=a;
// }
// }
return 0;
}
//Copied/pasted comments from original fortran code by Friedman
// input:
// n : number of observations (x,y - pairs).
// x(n) : ordered abscissa values.
// y(n) : corresponding ordinate (response) values.
// w(n) : weight for each (x,y) observation.
// iper : periodic variable flag.
// iper=1 => x is ordered interval variable.
// iper=2 => x is a periodic variable with values
// in the range (0.0,1.0) and period 1.0.
// span : smoother span (fraction of observations in window).
// span=0.0 => automatic (variable) span selection.
// alpha : controles high frequency (small span) penality
// used with automatic span selection (bass tone control).
// (alpha.le.0.0 or alpha.gt.10.0 => no effect.)
// output:
// smo(n) : smoothed ordinate (response) values.
// scratch:
// sc(n,8) : internal working storage.
__global__ void supsmukernel(const int numThreads, const int n, const int iper, const DTYPE span, const DTYPE alpha, DTYPE * smo,
DTYPE * sc, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
if (tid>=numThreads)
{
return;
}
const unsigned int dataOffset=tid*n;
const unsigned int scOffset=tid*n*8;
//global memory
// pointers to time, data, offset
DTYPE * x=t1_sortby_argkeys+dataOffset;
DTYPE * y=data_sortby_argkeys+dataOffset;
DTYPE * w=weights_sortby_argkeys+dataOffset;
DTYPE * smo_thread=smo+dataOffset;
DTYPE * sc_thread=sc+scOffset;
//Store thread-local variables in shared-memory
int i,j,jper;
DTYPE vsmlsq,sw,sy,a,scale,resmin,tmp,f;
// spans to be estimated: tweeter, midrange, and woofer
DTYPE spans[] = {0.05,0.2,0.5};
if (x[n-1]<=x[0]) {
sy=0.0;
sw=sy;
for (j=0;j<n;j++) {
sy=sy+w[j]*y[j];
sw=sw+w[j];
}
a=0.0;
if (sw>0) a=sy/sw;
for (j=0;j<n;j++) smo_thread[j] = a;
return;
}
i=n/4-1;
j=3*(i+1)-1;
scale=x[j]-x[i];
//Nat: rm-- should never be entered
// while (scale<=0) {
// if (j<n-1) j+=1;
// if (i>0) i-=1;
// scale=x[j]-x[i];
// }
vsmlsq=1.e-6*scale*scale;
jper=iper;
if (iper==2 && (x[0]<0 || x[n-1]>1)) jper=1;
if (jper<1 || jper>2) jper=1;
if (span>0) {
smoothkernel (n,x,y,w,span,jper,vsmlsq,smo_thread,sc_thread); // fixed span
return;
}
// if we made it here, the span will be estimated and variable
for (i=0;i<3;i++) {
smoothkernel (n,x,y,w,spans[i],jper,vsmlsq,sc_thread+2*i*n,sc_thread+6*n);
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+(2*i+1)*n,sc_thread+7*n);
}
for (j=0;j<n;j++) {
resmin=1.e20;
for (i=0;i<3;i++) {
if (sc_thread[j+(2*i+1)*n]<resmin) {
resmin=sc_thread[j+(2*i+1)*n];
sc_thread[j+6*n]=spans[i];
}
}
if (alpha>0 && alpha<=10 && resmin<sc_thread[j+5*n] && resmin>0) {
tmp = resmin/sc_thread[j+5*n];
if (tmp<1.e-7) tmp=1.e-7;
sc_thread[j+6*n]+=(spans[2]-sc_thread[j+6*n])*pow(tmp,10.0-alpha);
}
}
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+n,sc_thread+7*n);
for (j=0;j<n;j++) {
if (sc_thread[j+n]<=spans[0]) sc_thread[j+n]=spans[0];
if (sc_thread[j+n]>=spans[2]) sc_thread[j+n]=spans[2];
f=sc_thread[j+n]-spans[1];
if (f<0) {
f/=spans[0]-spans[1];
sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j];
} else {
f/=spans[2]-spans[1];
sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j+4*n];
}
}
smoothkernel (n,x,sc_thread+3*n,w,spans[0],-jper,vsmlsq,smo_thread,sc_thread+7*n);
return;
}
//Copied/pasted comments from original fortran code by Friedman
// input:
// n : number of observations (x,y - pairs).
// x(n) : ordered abscissa values.
// y(n) : corresponding ordinate (response) values.
// w(n) : weight for each (x,y) observation.
// iper : periodic variable flag.
// iper=1 => x is ordered interval variable.
// iper=2 => x is a periodic variable with values
// in the range (0.0,1.0) and period 1.0.
// span : smoother span (fraction of observations in window).
// span=0.0 => automatic (variable) span selection.
// alpha : controles high frequency (small span) penality
// used with automatic span selection (bass tone control).
// (alpha.le.0.0 or alpha.gt.10.0 => no effect.)
// output:
// smo(n) : smoothed ordinate (response) values.
// scratch:
// sc(n,7) : internal working storage.
__global__ void supsmukernelOneThread (const int n, DTYPE * x, DTYPE * y, DTYPE * w, const int iper, const DTYPE span, const DTYPE alpha, DTYPE * smo, DTYPE * sc)
{
// sc is scratch space (8,n)
// output is smo: smoothed version of y
int i,j,jper;
DTYPE vsmlsq,sw,sy,a,scale,resmin,tmp,f;
// spans to be estimated: tweeter, midrange, and woofer
DTYPE spans[] = {0.05,0.2,0.5};
if (x[n-1]<=x[0]) {
sy=0.0;
sw=sy;
for (j=0;j<n;j++) {
sy=sy+w[j]*y[j];
sw=sw+w[j];
}
a=0.0;
if (sw>0) a=sy/sw;
for (j=0;j<n;j++) smo[j] = a;
return;
}
i=n/4-1;
j=3*(i+1)-1;
scale=x[j]-x[i];
//Nat: can be removed
// while (scale<=0) {
// if (j<n-1) j+=1;
// if (i>0) i-=1;
// scale=x[j]-x[i];
// }
vsmlsq=1.e-6*scale*scale;
jper=iper;
if (iper==2 && (x[0]<0 || x[n-1]>1)) jper=1;
if (jper<1 || jper>2) jper=1;
if (span>0) {
smoothkernel (n,x,y,w,span,jper,vsmlsq,smo,sc); // fixed span
return;
}
// if we made it here, the span will be estimated and variable
for (i=0;i<3;i++) {
smoothkernel (n,x,y,w,spans[i],jper,vsmlsq,sc+2*i*n,sc+6*n);
smoothkernel (n,x,sc+6*n,w,spans[1],-jper,vsmlsq,sc+(2*i+1)*n,sc+7*n);
}
for (j=0;j<n;j++) {
resmin=1.e20;
for (i=0;i<3;i++) {
if (sc[j+(2*i+1)*n]<resmin) {
resmin=sc[j+(2*i+1)*n];
sc[j+6*n]=spans[i];
}
}
if (alpha>0 && alpha<=10 && resmin<sc[j+5*n] && resmin>0) {
tmp = resmin/sc[j+5*n];
if (tmp<1.e-7) tmp=1.e-7;
sc[j+6*n]+=(spans[2]-sc[j+6*n])*pow(tmp,10.0-alpha);
}
}
smoothkernel (n,x,sc+6*n,w,spans[1],-jper,vsmlsq,sc+n,sc+7*n);
for (j=0;j<n;j++) {
if (sc[j+n]<=spans[0]) sc[j+n]=spans[0];
if (sc[j+n]>=spans[2]) sc[j+n]=spans[2];
f=sc[j+n]-spans[1];
if (f<0) {
f/=spans[0]-spans[1];
sc[j+3*n]=(1.0-f)*sc[j+2*n]+f*sc[j];
} else {
f/=spans[2]-spans[1];
sc[j+3*n]=(1.0-f)*sc[j+2*n]+f*sc[j+4*n];
}
}
smoothkernel (n,x,sc+3*n,w,spans[0],-jper,vsmlsq,smo,sc+7*n);
return;
}
//All SM synchronization must occur before and after the function
__forceinline__ __device__ void parReductionMaximumPowerinSM(DTYPE maxPowerForComputingPeriod[], unsigned int maxPowerIdxForComputingPeriod[])
{
int i = blockDim.x / 2;
while (i != 0) {
if(threadIdx.x < i && maxPowerForComputingPeriod[threadIdx.x + i] > maxPowerForComputingPeriod[threadIdx.x])
{
maxPowerForComputingPeriod[threadIdx.x] = maxPowerForComputingPeriod[threadIdx.x + i];
maxPowerIdxForComputingPeriod[threadIdx.x] = maxPowerIdxForComputingPeriod[threadIdx.x + i];
}
__syncthreads();
i/=2;
}
}
//one small block computes a single frequency
//DEPRECATED: This kernel design does not perform well (worse than the global memory kernel and the SM kernel one thread per freq)
/*
__global__ void supsmukernelSMOneFreqBlock(const int n, const int iper, const DTYPE span, const DTYPE alpha, DTYPE * smo,
DTYPE * sc, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int i,j,jper;
DTYPE sw,sy,a,resmin,tmp,f;
//passed in at runtime through kernel
//space for x, y, and w arrays in SM
extern __shared__ DTYPE xyw[];
__shared__ DTYPE * x;
__shared__ DTYPE * y;
__shared__ DTYPE * w;
__shared__ DTYPE * sc_thread;
__shared__ unsigned int dataOffset;
__shared__ DTYPE spans[3];
__shared__ DTYPE vsmlsq;
__shared__ DTYPE scale;
DTYPE * smo_thread;
if(threadIdx.x==0)
{
//Compiling with std=c++14
//says that this causes an invalid bitcast?
x=xyw;
y=xyw+(n);
w=xyw+(2*n);
i=n/4-1;
j=3*(i+1)-1;
scale=x[j]-x[i];
vsmlsq=1.e-6*scale*scale;
dataOffset=blockIdx.x*n;
smo_thread=smo+dataOffset;
sc_thread=sc+blockIdx.x*n*8;
spans[0]=0.05;
spans[1]=0.2;
spans[2]=0.5;
}
__syncthreads();
for (int i=0; i<n && ((i+threadIdx.x)<n); i+=blockDim.x)
{
int idx=i+threadIdx.x;
x[idx]=t1_sortby_argkeys[dataOffset+idx];
y[idx]=data_sortby_argkeys[dataOffset+idx];
w[idx]=weights_sortby_argkeys[dataOffset+idx];
}
__syncthreads();
//////////////////////////////////
//Below is the original supsmu code
//Use one thread in the block
if (threadIdx.x==0)
{
if (x[n-1]<=x[0]) {
sy=0.0;
sw=sy;
for (j=0;j<n;j++) {
sy=sy+w[j]*y[j];
sw=sw+w[j];
}
a=0.0;
if (sw>0) a=sy/sw;
for (j=0;j<n;j++) smo_thread[j] = a;
return;
}
//Nat: can be removed
// while (scale<=0) {
// if (j<n-1) j+=1;
// if (i>0) i-=1;
// scale=x[j]-x[i];
// }
jper=iper;
if (iper==2 && (x[0]<0 || x[n-1]>1)) jper=1;
if (jper<1 || jper>2) jper=1;
if (span>0) {
smoothkernel (n,x,y,w,span,jper,vsmlsq,smo_thread,sc_thread); // fixed span
return;
}
} //end if threadIdx.x==0
// if we made it here, the span will be estimated and variable
__syncthreads();
//original
//Use one thread in the block
if (threadIdx.x==0)
{
for (i=0;i<3;i++) {
smoothkernel (n,x,y,w,spans[i],jper,vsmlsq,sc_thread+2*i*n,sc_thread+6*n);
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+(2*i+1)*n,sc_thread+7*n);
}
}
__syncthreads();
//original
// for (j=0;j<n;j++) {
// resmin=1.e20;
// for (i=0;i<3;i++) {
// if (sc_thread[j+(2*i+1)*n]<resmin) {
// resmin=sc_thread[j+(2*i+1)*n];
// sc_thread[j+6*n]=spans[i];
// }
// }
// if (alpha>0 && alpha<=10 && resmin<sc_thread[j+5*n] && resmin>0) {
// tmp = resmin/sc_thread[j+5*n];
// if (tmp<1.e-7) tmp=1.e-7;
// sc_thread[j+6*n]+=(spans[2]-sc_thread[j+6*n])*pow(tmp,10.0-alpha);
// }
// }
//parallelized with threads
for (j=0;j<n && ((j+threadIdx.x)<n);j+=blockDim.x) {
int idx=j+threadIdx.x;
resmin=1.e20;
for (i=0;i<3;i++) {
if (sc_thread[idx+(2*i+1)*n]<resmin) {
resmin=sc_thread[idx+(2*i+1)*n];
sc_thread[idx+6*n]=spans[i];
}
}
if (alpha>0 && alpha<=10 && resmin<sc_thread[idx+5*n] && resmin>0) {
tmp = resmin/sc_thread[idx+5*n];
if (tmp<1.e-7) tmp=1.e-7;
sc_thread[idx+6*n]+=(spans[2]-sc_thread[idx+6*n])*pow(tmp,10.0-alpha);
}
}
__syncthreads();
//one thread
if (threadIdx.x==0)
{
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+n,sc_thread+7*n);
}
__syncthreads();
//original
// for (j=0;j<n;j++) {
// if (sc_thread[j+n]<=spans[0]) sc_thread[j+n]=spans[0];
// if (sc_thread[j+n]>=spans[2]) sc_thread[j+n]=spans[2];
// f=sc_thread[j+n]-spans[1];
// if (f<0) {
// f/=spans[0]-spans[1];
// sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j];
// } else {
// f/=spans[2]-spans[1];
// sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j+4*n];
// }
// }
//parallelized with threads
for (j=0;j<n && ((j+threadIdx.x)<n);j+=blockDim.x) {
// for (j=0;j<n;j++) {
int idx=j+threadIdx.x;
if (sc_thread[idx+n]<=spans[0]) sc_thread[idx+n]=spans[0];
if (sc_thread[idx+n]>=spans[2]) sc_thread[idx+n]=spans[2];
f=sc_thread[idx+n]-spans[1];
if (f<0) {
f/=spans[0]-spans[1];
sc_thread[idx+3*n]=(1.0-f)*sc_thread[idx+2*n]+f*sc_thread[idx];
} else {
f/=spans[2]-spans[1];
sc_thread[idx+3*n]=(1.0-f)*sc_thread[idx+2*n]+f*sc_thread[idx+4*n];
}
}
__syncthreads();
//only one thread computes this
if(threadIdx.x==0)
{
smoothkernel (n,x,sc_thread+3*n,w,spans[0],-jper,vsmlsq,smo_thread,sc_thread+7*n);
}
return;
}
*/
//kernel gets called after the main kernel
//some number of threads per frequency
__global__ void computePgramReduction(const int batchwriteoffset, const int numThreadsPerFreq, const DTYPE chi0, const int n, const int numFreq, DTYPE * smo, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys, DTYPE * pgram)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
extern __shared__ DTYPE globalSum[];
int freqNum=i/numThreadsPerFreq;
int threadInFreq=i%numThreadsPerFreq;
int freqInBlock=threadIdx.x/numThreadsPerFreq;
DTYPE localSum=0;
if(i<(numFreq*numThreadsPerFreq))
{
if (threadInFreq==0)
{
globalSum[freqInBlock]=0;
}
} //end if(i<(numFreq*numThreadsPerFreq))
__syncthreads();
if(i<(numFreq*numThreadsPerFreq))
{
int idxmin=(freqNum*n);
int idxmax=(idxmin+n);
for (int k=idxmin; k<idxmax; k+=numThreadsPerFreq){
// int idx=(freqNum*n)+(k+threadInFreq);
int idx=k+threadInFreq;
if (idx<idxmax)
{
localSum+=((data_sortby_argkeys[idx]-smo[idx])*(data_sortby_argkeys[idx]-smo[idx]))*weights_sortby_argkeys[idx];
}
}
}
if(i<(numFreq*numThreadsPerFreq))
{
atomicAdd(&globalSum[freqInBlock],localSum);
}
__syncthreads();
if (threadInFreq==0 && i<(numFreq*numThreadsPerFreq))
{
// chi2[batchwriteoffset+freqNum]=globalSum[freqInBlock]/(n*1.0);
// pgram[batchwriteoffset+freqNum]=(0.5*(chi0-chi2[batchwriteoffset+freqNum])*n)/chi0;
DTYPE chi2=globalSum[freqInBlock]/(n*1.0);
pgram[batchwriteoffset+freqNum]=(0.5*(chi0-chi2)*n)/chi0;
}
}
//kernel gets called after the main kernel
//some number of threads per frequency
//data, weights reordered for coalesced memory accesses
// x[j] -> x[freqNum+(numFreq*j)]
__global__ void computePgramReductionCoalesced(const int batchwriteoffset, const int numThreadsPerFreq, const DTYPE chi0, const int n, const int numFreq, DTYPE * smo, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys, DTYPE * pgram)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
extern __shared__ DTYPE globalSum[];
int freqNum=i/numThreadsPerFreq;
int threadInFreq=i%numThreadsPerFreq;
int freqInBlock=threadIdx.x/numThreadsPerFreq;
DTYPE localSum=0;
if(i<(numFreq*numThreadsPerFreq))
{
if (threadInFreq==0)
{
globalSum[freqInBlock]=0;
}
} //end if(i<(numFreq*numThreadsPerFreq))
__syncthreads();
if(i<(numFreq*numThreadsPerFreq))
{
int idxmin=(freqNum*n);
int idxmax=(idxmin+n);
for (int k=idxmin; k<idxmax; k+=numThreadsPerFreq){
int idx=k+threadInFreq;
int idxCoalesced=freqNum+(numFreq*(idx-idxmin)); //idx-idxmin because its one big long array, not an array with n elements
if (idx<idxmax)
{
localSum+=((data_sortby_argkeys[idxCoalesced]-smo[idx])*(data_sortby_argkeys[idxCoalesced]-smo[idx]))*weights_sortby_argkeys[idxCoalesced];
}
}
}
if(i<(numFreq*numThreadsPerFreq))
{
atomicAdd(&globalSum[freqInBlock],localSum);
}
__syncthreads();
if (threadInFreq==0 && i<(numFreq*numThreadsPerFreq))
{
// chi2[batchwriteoffset+freqNum]=globalSum[freqInBlock]/(n*1.0);
// pgram[batchwriteoffset+freqNum]=(0.5*(chi0-chi2[batchwriteoffset+freqNum])*n)/chi0;
DTYPE chi2=globalSum[freqInBlock]/(n*1.0);
pgram[batchwriteoffset+freqNum]=(0.5*(chi0-chi2)*n)/chi0;
}
}
__global__ void computePeriodModFOneThreadPerUpdate(const int n, const int numFreq, const DTYPE minFreq, const uint64_t freqOffset, const DTYPE deltaf, DTYPE * t1, DTYPE * tt)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
if (i>=(n*numFreq))
{
return;
}
int freqNum=i/n;
DTYPE p=1.0/((minFreq)+(deltaf*(freqOffset+freqNum)));
t1[i]=fmod(tt[i%n],p)/p;
}
__global__ void initializeKeyArraysOneThreadPerUpdate(const int n, const int numFreq, int * argkeys, int * freqId)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
if (i>=(n*numFreq))
{
return;
}
int freqNum=i/n;
//iota
argkeys[i]=i%n;
//same frequency id for the freqId array
freqId[i]=freqNum;
}
__global__ void mapUsingArgKeysOneThreadPerUpdate(const int n, const int numFreq, int * argkeys, DTYPE * data, DTYPE * weights, DTYPE * t1, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
if (i>=(n*numFreq))
{
return;
}
//t1 has already been sorted. Only make a copy.
t1_sortby_argkeys[i]=t1[i];
//map between t1 argkeys and data and weights
data_sortby_argkeys[i]=data[argkeys[i]];
weights_sortby_argkeys[i]=weights[argkeys[i]];
}
//used for coalesced memory mapping
__global__ void mapUsingArgKeysOneThreadPerUpdateAndReorderCoalesced(const int n, const int numFreq, int * argkeys, DTYPE * data, DTYPE * weights, DTYPE * t1, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
if (i>=(n*numFreq))
{
return;
}
const int idxInFreq=(i%n);
const int freqNum=i/n;
const int idxWrite=(idxInFreq*numFreq)+freqNum;
//t1 has already been sorted. Only make a copy.
// t1_sortby_argkeys[idxWrite]=t1[argkeys[i]];
t1_sortby_argkeys[idxWrite]=t1[i];
data_sortby_argkeys[idxWrite]=data[argkeys[i]];
weights_sortby_argkeys[idxWrite]=weights[argkeys[i]];
}
//Uses SM but uses one thread per frequency
__global__ void supsmukernelSMOneThreadPerFreq(const int numFreq, const int n, const int iper, const DTYPE span, const DTYPE alpha,
DTYPE * smo, DTYPE * sc, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
unsigned int dataOffset=tid*n;
unsigned int scOffset=tid*n*8;
DTYPE * smo_thread=smo+dataOffset;
DTYPE * sc_thread=sc+scOffset;
//size passed in at runtime through kernel
extern __shared__ DTYPE xyw[];
__shared__ DTYPE spans[3];
DTYPE * x=xyw+(threadIdx.x*3*n);
DTYPE * y=xyw+(threadIdx.x*3*n)+(n);
DTYPE * w=xyw+(threadIdx.x*3*n)+(2*n);
//One thread copies the spans into SM
if(threadIdx.x==0)
{
spans[0]=0.05;
spans[1]=0.2;
spans[2]=0.5;
}
__syncthreads();
if(tid<numFreq)
{
for (int i=0; i<n; i++)
{
x[i]=t1_sortby_argkeys[dataOffset+i];
y[i]=data_sortby_argkeys[dataOffset+i];
w[i]=weights_sortby_argkeys[dataOffset+i];
}
}
//////////////////////////////////
//Below is the original supsmu code
int i,j,jper;
DTYPE vsmlsq,sw,sy,a,scale,resmin,tmp,f;
if(tid<numFreq)
{
if (x[n-1]<=x[0]) {
sy=0.0;
sw=sy;
for (j=0;j<n;j++) {
sy=sy+w[j]*y[j];
sw=sw+w[j];
}
a=0.0;
if (sw>0) a=sy/sw;
for (j=0;j<n;j++) smo_thread[j] = a;
return;
}
i=n/4-1;
j=3*(i+1)-1;
scale=x[j]-x[i];
vsmlsq=1.e-6*scale*scale;
jper=iper;
if (iper==2 && (x[0]<0 || x[n-1]>1)) jper=1;
if (jper<1 || jper>2) jper=1;
if (span>0) {
smoothkernel (n,x,y,w,span,jper,vsmlsq,smo_thread,sc_thread); // fixed span
return;
}
//Nat: if we made it here, the span will be estimated and variable
for (i=0;i<3;i++) {
smoothkernel (n,x,y,w,spans[i],jper,vsmlsq,sc_thread+2*i*n,sc_thread+6*n);
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+(2*i+1)*n,sc_thread+7*n);
}
for (j=0;j<n;j++) {
resmin=1.e20;
for (i=0;i<3;i++) {
if (sc_thread[j+(2*i+1)*n]<resmin) {
resmin=sc_thread[j+(2*i+1)*n];
sc_thread[j+6*n]=spans[i];
}
}
if (alpha>0 && alpha<=10 && resmin<sc_thread[j+5*n] && resmin>0) {
tmp = resmin/sc_thread[j+5*n];
if (tmp<1.e-7) tmp=1.e-7;
sc_thread[j+6*n]+=(spans[2]-sc_thread[j+6*n])*pow(tmp,10.0-alpha);
}
}
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+n,sc_thread+7*n);
for (j=0;j<n;j++) {
if (sc_thread[j+n]<=spans[0]) sc_thread[j+n]=spans[0];
if (sc_thread[j+n]>=spans[2]) sc_thread[j+n]=spans[2];
f=sc_thread[j+n]-spans[1];
if (f<0) {
f/=spans[0]-spans[1];
sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j];
} else {
f/=spans[2]-spans[1];
sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j+4*n];
}
}
smoothkernel (n,x,sc_thread+3*n,w,spans[0],-jper,vsmlsq,smo_thread,sc_thread+7*n);
}//end the if(tid<numFreq) around everything
return;
}
//Smooth() function for single pass
//Nat's updated function
__device__ void smoothSinglePassCoalesced(const int n, const int freqNum, const int numFreq, int * ibw, DTYPE * x, DTYPE * y, DTYPE * w, const DTYPE vsmlsq, const int alpha, DTYPE * smo)
{
int i,j,in,out;
DTYPE wt,xto,xti,yto,yti,ibwb,a,f,chi2,chi2m,tmp,fbo,vary=0.0;
// DTYPE fbw[3],xm[3],ym[3],smo0[3],var[3]={0,0,0},cvar[3]={0,0,0};
//Original: if arrays stored in registers
DTYPE fbw[3],xm[3],ym[3],smo0[3],var[3]={0,0,0},cvar[3]={0,0,0};
const int offset=0;
// //end original if using arrays in registers
//If SINGLEPASSMODE==0 or SINGLEPASSMODE==2 (if not (SINGLEPASSMODE==1), then don't need to multiply by SMALLBLOCKSIZE)
//If using SM for these arrays
// __shared__ DTYPE fbw[SMALLBLOCKSIZE*3];
// __shared__ DTYPE xm[SMALLBLOCKSIZE*3];
// __shared__ DTYPE ym[SMALLBLOCKSIZE*3];
// __shared__ DTYPE smo0[SMALLBLOCKSIZE*3];
// __shared__ DTYPE var[SMALLBLOCKSIZE*3];
// __shared__ DTYPE cvar[SMALLBLOCKSIZE*3];
// const int offset=threadIdx.x*3;
// for (i=0; i<3; i++)
// {
// var[offset+i]=0;
// cvar[offset+i]=0;
// }
//data access transformation for coalesced memory accesses
// x[j] -> x[freqNum+(numFreq*j)]
for (i=0;i<3;i++) {
j=n-ibw[i]-1;
// xm[offset+i]=x[j]-1.0;
// ym[offset+i]=y[j];
// fbw[offset+i]=w[j];
xm[offset+i]=x[freqNum+(numFreq*j)]-1.0;
ym[offset+i]=y[freqNum+(numFreq*j)];
fbw[offset+i]=w[freqNum+(numFreq*j)];
for (j=n-ibw[i];j<n;j++) {
// xti=x[j]-1.0;
// yti=y[j];
// wt=w[j];
xti=x[freqNum+(numFreq*j)]-1.0;
yti=y[freqNum+(numFreq*j)];
wt=w[freqNum+(numFreq*j)];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
for (j=0;j<ibw[i];j++) {
// xti=x[j];
// yti=y[j];
// wt=w[j];
xti=x[freqNum+(numFreq*j)];
yti=y[freqNum+(numFreq*j)];
wt=w[freqNum+(numFreq*j)];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
}
for (j=0;j<n;j++) {
for (i=0;i<3;i++) {
out=j-ibw[i]-1;
in=j+ibw[i];
if (in>n-1) {
in-=n;
// xti=x[in]+1.0;
xti=x[freqNum+(numFreq*in)]+1.0;
// } else xti=x[in];
} else xti=x[freqNum+(numFreq*in)];
if (out<0) {
out+=n;
// xto=x[out]-1.0;
xto=x[freqNum+(numFreq*out)]-1.0;
// } else xto=x[out];
} else xto=x[freqNum+(numFreq*out)];
// yti=y[in];
// yto=y[out];
yti=y[freqNum+(numFreq*in)];
yto=y[freqNum+(numFreq*out)];
// wt=w[out];
wt=w[freqNum+(numFreq*out)];
fbo=fbw[offset+i];
fbw[offset+i]-=wt;
tmp=fbo*wt*(xto-xm[offset+i])/fbw[offset+i];
var[offset+i]-=tmp*(xto-xm[offset+i]);
cvar[offset+i]-=tmp*(yto-ym[offset+i]);
if (i==0) vary-=fbo*wt*(yto-ym[offset+0])*(yto-ym[offset+0])/fbw[offset+0];
xm[offset+i]=(fbo*xm[offset+i]-wt*xto)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]-wt*yto)/fbw[offset+i];
// wt=w[in];
wt=w[freqNum+(numFreq*in)];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
chi2m=1.e20; ibwb=ibw[2];
for (i=0;i<3;i++) {
a=0.0;
if (var[offset+i]>vsmlsq) a=cvar[offset+i]/var[offset+i];
// smo0[offset+i]=a*(x[j]-xm[offset+i])+ym[offset+i];
smo0[offset+i]=a*(x[freqNum+(numFreq*j)]-xm[offset+i])+ym[offset+i];
chi2 = vary-2*a*cvar[offset+0]+a*a*var[offset+0];
if (i>0) {
tmp = ym[offset+i]-ym[offset+0]-a*(xm[offset+i]-xm[offset+0]);
chi2 += tmp*tmp*fbw[offset+0];
}
tmp=1.0/fbw[offset+i];
// if (var[offset+i]>vsmlsq) tmp+=(x[j]-xm[offset+i])*(x[j]-xm[offset+i])/var[offset+i];
if (var[offset+i]>vsmlsq) tmp+=(x[freqNum+(numFreq*j)]-xm[offset+i])*(x[freqNum+(numFreq*j)]-xm[offset+i])/var[offset+i];
// tmp = 1.0 - w[j]*tmp;
tmp = 1.0 - w[freqNum+(numFreq*j)]*tmp;
chi2 = fabs(chi2)/(tmp*tmp);
if (chi2<chi2m) {
chi2m=chi2;
ibwb=(ibw[1]+ibw[i])/2.;
}
}
tmp = sqrt(chi2m/chi2);
if (tmp<1.e-7) tmp=1.e-7;
ibwb+=(ibw[2]-ibwb)*pow(tmp,10.-alpha);
f = ibwb-ibw[1];
if (f<0) {
f/=ibw[0]-ibw[1];
smo[j]=(1.0-f)*smo0[offset+1]+f*smo0[offset+0];
} else {
f/=ibw[2]-ibw[1];
smo[j]=(1.0-f)*smo0[offset+1]+f*smo0[offset+2];
}
}
}
//Smooth() function for single pass
//Nat's updated function
__device__ void smoothSinglePass(const int n, int * ibw, DTYPE * x, DTYPE * y, DTYPE * w, const DTYPE vsmlsq, const int alpha, DTYPE * smo)
{
int i,j,in,out;
DTYPE wt,xto,xti,yto,yti,ibwb,a,f,chi2,chi2m,tmp,fbo,vary=0.0;
// DTYPE fbw[3],xm[3],ym[3],smo0[3],var[3]={0,0,0},cvar[3]={0,0,0};
//Original: if arrays stored in registers
DTYPE fbw[3],xm[3],ym[3],smo0[3],var[3]={0,0,0},cvar[3]={0,0,0};
const int offset=0;
//end original if using arrays in registers
//If SINGLEPASSMODE==2 (if not, then don't need to multiply by SMALLBLOCKSIZE)
//If using SM for these arrays
// __shared__ DTYPE fbw[SMALLBLOCKSIZE*3];
// __shared__ DTYPE xm[SMALLBLOCKSIZE*3];
// __shared__ DTYPE ym[SMALLBLOCKSIZE*3];
// __shared__ DTYPE smo0[SMALLBLOCKSIZE*3];
// __shared__ DTYPE var[SMALLBLOCKSIZE*3];
// __shared__ DTYPE cvar[SMALLBLOCKSIZE*3];
// const int offset=threadIdx.x*3;
// for (i=0; i<3; i++)
// {
// var[offset+i]=0;
// cvar[offset+i]=0;
// }
//end if using SM for these arrays
for (i=0;i<3;i++) {
j=n-ibw[i]-1;
xm[offset+i]=x[j]-1.0;
ym[offset+i]=y[j];
fbw[offset+i]=w[j];
for (j=n-ibw[i];j<n;j++) {
xti=x[j]-1.0;
yti=y[j];
wt=w[j];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
for (j=0;j<ibw[i];j++) {
xti=x[j];
yti=y[j];
wt=w[j];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
}
for (j=0;j<n;j++) {
for (i=0;i<3;i++) {
out=j-ibw[i]-1;
in=j+ibw[i];
if (in>n-1) {
in-=n;
xti=x[in]+1.0;
} else xti=x[in];
if (out<0) {
out+=n;
xto=x[out]-1.0;
} else xto=x[out];
yti=y[in];
yto=y[out];
wt=w[out];
fbo=fbw[offset+i];
fbw[offset+i]-=wt;
tmp=fbo*wt*(xto-xm[offset+i])/fbw[offset+i];
var[offset+i]-=tmp*(xto-xm[offset+i]);
cvar[offset+i]-=tmp*(yto-ym[offset+i]);
if (i==0) vary-=fbo*wt*(yto-ym[offset+0])*(yto-ym[offset+0])/fbw[offset+0];
xm[offset+i]=(fbo*xm[offset+i]-wt*xto)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]-wt*yto)/fbw[offset+i];
wt=w[in];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
chi2m=1.e20; ibwb=ibw[2];
for (i=0;i<3;i++) {
a=0.0;
if (var[offset+i]>vsmlsq) a=cvar[offset+i]/var[offset+i];
smo0[offset+i]=a*(x[j]-xm[offset+i])+ym[offset+i];
chi2 = vary-2*a*cvar[offset+0]+a*a*var[offset+0];
if (i>0) {
tmp = ym[offset+i]-ym[offset+0]-a*(xm[offset+i]-xm[offset+0]);
chi2 += tmp*tmp*fbw[offset+0];
}
tmp=1.0/fbw[offset+i];
if (var[offset+i]>vsmlsq) tmp+=(x[j]-xm[offset+i])*(x[j]-xm[offset+i])/var[offset+i];
tmp = 1.0 - w[j]*tmp;
chi2 = fabs(chi2)/(tmp*tmp);
if (chi2<chi2m) {
chi2m=chi2;
ibwb=(ibw[1]+ibw[i])/2.;
}
}
tmp = sqrt(chi2m/chi2);
if (tmp<1.e-7) tmp=1.e-7;
ibwb+=(ibw[2]-ibwb)*pow(tmp,10.-alpha);
f = ibwb-ibw[1];
if (f<0) {
f/=ibw[0]-ibw[1];
smo[j]=(1.0-f)*smo0[offset+1]+f*smo0[offset+0];
} else {
f/=ibw[2]-ibw[1];
smo[j]=(1.0-f)*smo0[offset+1]+f*smo0[offset+2];
}
}
}
__global__ void supsmukernelSinglePassSMOneBlockPerFreq(const int n, const DTYPE inalpha, DTYPE * smo, DTYPE * t1_sortby_argkeys,
DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
const unsigned int dataOffset=blockIdx.x*n;
DTYPE * smo_thread=smo+dataOffset;
//Shared memory
//size passed in at runtime through kernel
extern __shared__ DTYPE xyw[];
DTYPE * x=xyw;
DTYPE * y=xyw+(n);
DTYPE * w=xyw+(2*n);
//SM
__shared__ int ibw[3];
__shared__ DTYPE spans[3];
__shared__ DTYPE scale;
__shared__ DTYPE vsmlsq;
__shared__ DTYPE alpha;
for (int i=0; i<n && ((i+threadIdx.x)<n); i++)
{
int idx=i+threadIdx.x;
x[idx]=t1_sortby_argkeys[dataOffset+idx];
y[idx]=data_sortby_argkeys[dataOffset+idx];
w[idx]=weights_sortby_argkeys[dataOffset+idx];
}
__syncthreads();
//one thread in the block computes everything
if (threadIdx.x==0)
{
int i=n/4-1;
int j=3*(i+1)-1;
scale=x[j]-x[i];
vsmlsq=1.e-6*scale*scale;
alpha=inalpha;
if (alpha<0) alpha=0;
if (alpha>10) alpha=10;
spans[0]=0.05;
spans[1]=0.2;
spans[2]=0.5;
for (int i=0;i<3;i++) {
ibw[i] = (int)( 0.5*spans[i]*n+0.5 );
if (ibw[i]<2) ibw[i]=2;
}
smoothSinglePass(n, ibw, x, y, w, vsmlsq, alpha, smo_thread);
}
return;
}
__global__ void supsmukernelSinglePassSMOneThreadPerFreq(const int numFreq, const int n, const DTYPE inalpha, DTYPE * smo,
DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
if (tid>=numFreq)
{
return;
}
const unsigned int dataOffset=tid*n;
DTYPE * smo_thread=smo+dataOffset;
//Shared memory
//size passed in at runtime through kernel
extern __shared__ DTYPE xyw[];
DTYPE * x=xyw+(threadIdx.x*3*n);
DTYPE * y=xyw+(threadIdx.x*3*n)+(n);
DTYPE * w=xyw+(threadIdx.x*3*n)+(2*n);
for (int i=0; i<n; i++)
{
x[i]=t1_sortby_argkeys[dataOffset+i];
y[i]=data_sortby_argkeys[dataOffset+i];
w[i]=weights_sortby_argkeys[dataOffset+i];
}
//use SM
//each thread needs its own ibw
// __shared__ int ibwSM[3*SMALLBLOCKSIZE];
__shared__ int ibw[3*SMALLBLOCKSIZE];
//Constant for all frequencies
__shared__ DTYPE spans[3];
__shared__ DTYPE alpha;
int i=n/4-1;
int j=3*(i+1)-1;
DTYPE scale=x[j]-x[i];
DTYPE vsmlsq=1.e-6*scale*scale;
if (threadIdx.x==0)
{
//constant for all frequencies
alpha=inalpha;
if (alpha<0) alpha=0;
if (alpha>10) alpha=10;
spans[0]=0.05;
spans[1]=0.2;
spans[2]=0.5;
}
__syncthreads();
//update pointer to my ibw
// int * ibw=&ibwSM[threadIdx.x*3];
// for (int i=0;i<3;i++) {
// ibw[i] = (int)( 0.5*spans[i]*n+0.5 );
// if (ibw[i]<2) ibw[i]=2;
// }
for (int i=0;i<3;i++) {
ibw[threadIdx.x*3+i] = (int)( 0.5*spans[i]*n+0.5 );
if (ibw[threadIdx.x*3+i]<2) ibw[threadIdx.x*3+i]=2;
}
smoothSinglePass(n, &ibw[threadIdx.x*3], x, y, w, vsmlsq, alpha, smo_thread);
return;
}
//global memory baseline for singlepass
__global__ void supsmukernelSinglePassGlobalMemory(const int numFreq, const int n,
const DTYPE inalpha, DTYPE * smo, DTYPE * tt, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
if (tid>=numFreq)
{
return;
}
const unsigned int dataOffset=tid*n;
DTYPE * smo_thread=smo+dataOffset;
// pointers to time, data, offset
DTYPE * x=t1_sortby_argkeys+dataOffset;
DTYPE * y=data_sortby_argkeys+dataOffset;
DTYPE * w=weights_sortby_argkeys+dataOffset;
int ibw[3];
DTYPE spans[3] = {0.05,0.2,0.5};
int i=n/4-1;
int j=3*(i+1)-1;
DTYPE scale=x[j]-x[i];
DTYPE vsmlsq=1.e-6*scale*scale;
DTYPE alpha=inalpha;
if (alpha<0) alpha=0;
if (alpha>10) alpha=10;
for (int i=0;i<3;i++) {
ibw[i] = (int)( 0.5*spans[i]*n+0.5 );
if (ibw[i]<2) ibw[i]=2;
}
smoothSinglePass(n, ibw, x, y, w, vsmlsq, alpha, smo_thread);
return;
}
//global memory for singlepass -- coalesced memory accesses
__global__ void supsmukernelSinglePassGlobalMemoryCoalesced(const int numFreq, const int n,
const DTYPE inalpha, DTYPE * smo, DTYPE * tt, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
if (tid>=numFreq)
{
return;
}
const unsigned int dataOffset=tid*n;
DTYPE * smo_thread=smo+dataOffset;
//original in registers
int ibw[3];
DTYPE spans[3] = {0.05,0.2,0.5};
//end original in registers
//shared memory
// __shared__ DTYPE spans[3];
// __shared__ int ibwSM[3*SMALLBLOCKSIZE];
// int * ibw=&ibwSM[threadIdx.x*3];
// if (threadIdx.x==0)
// {
// spans[0]=0.05;
// spans[1]=0.2;
// spans[2]=0.5;
// }
// __syncthreads();
//end shared memory
int i=n/4-1;
int j=3*(i+1)-1;
// DTYPE scale=x[j]-x[i];
DTYPE scale=t1_sortby_argkeys[tid+(numFreq*j)]-t1_sortby_argkeys[tid+(numFreq*i)];
DTYPE vsmlsq=1.e-6*scale*scale;
DTYPE alpha=inalpha;
if (alpha<0) alpha=0;
if (alpha>10) alpha=10;
for (int i=0;i<3;i++) {
ibw[i] = (int)( 0.5*spans[i]*n+0.5 );
if (ibw[i]<2) ibw[i]=2;
}
smoothSinglePassCoalesced(n, tid, numFreq, ibw, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys, vsmlsq, alpha, smo_thread);
return;
}
| 288c1d1830911cf556bee2d3dde34069f5ba0341.cu | #include "kernel.h"
#include <thrust/sort.h>
//Only include parameters file if we're not creating the shared library
#ifndef PYTHON
#include "params.h"
#endif
__device__ int smoothkernel(int n, DTYPE * x, DTYPE * y, DTYPE * w, DTYPE span, int iper, DTYPE vsmlsq, DTYPE * smo, DTYPE * acvr)
{
int i,j,jper,in,out,ibw,it; //j0,
DTYPE xto,xti;
DTYPE wt,fbo,fbw=0.,xm=0.,ym=0.,tmp,var=0.,cvar=0.,a,h; //,sy
jper=abs(iper);
ibw=0.5*span*n+0.5;
if (ibw<2) ibw=2;
it=2*ibw+1;
for (i=0;i<it;i++) {
j=i;
if (jper==2) j=i-ibw-1;
if (j<0) {
j+=n;
xti=x[j]-1.0;
} else xti=x[j];
wt=w[j];
fbo=fbw;
fbw+=wt;
if (fbw>0) {
xm=(fbo*xm+wt*xti)/fbw;
ym=(fbo*ym+wt*y[j])/fbw;
}
if (fbo>0) {
tmp=fbw*wt*(xti-xm)/fbo;
var+=tmp*(xti-xm);
cvar+=tmp*(y[j]-ym);
}
}
for (j=0;j<n;j++) {
out=j-ibw-1;
in=j+ibw;
if (jper==2 || (out>=0 && in<n)) {
if (in>n-1) {
in-=n;
xti=x[in]+1.0;
} else xti=x[in];
if (out<0) {
out+=n;
xto=x[out]-1.0;
} else xto=x[out];
wt=w[out];
fbo=fbw;
fbw-=wt;
if (fbw>0) {
tmp=fbo*wt*(xto-xm)/fbw;
var-=tmp*(xto-xm);
cvar-=tmp*(y[out]-ym);
}
if (fbw>0) {
xm=(fbo*xm-wt*xto)/fbw;
ym=(fbo*ym-wt*y[out])/fbw;
}
wt=w[in];
fbo=fbw;
fbw+=wt;
if (fbw>0) {
xm=(fbo*xm+wt*xti)/fbw;
ym=(fbo*ym+wt*y[in])/fbw;
}
if (fbo>0) {
tmp=fbw*wt*(xti-xm)/fbo;
var+=tmp*(xti-xm);
cvar+=tmp*(y[in]-ym);
}
}
a=0.0;
if (var>vsmlsq) a=cvar/var;
smo[j]=a*(x[j]-xm)+ym;
if (iper>0) {
h=0.0;
if (fbw>0) h=1.0/fbw;
if (var>vsmlsq) h+=(x[j]-xm)*(x[j]-xm)/var;
acvr[j]=0.0;
a=1.0-w[j]*h;
if (a>0) acvr[j]=fabs(y[j]-smo[j])/a;
else if (j>0) acvr[j]=acvr[j-1];
}
}
//Nat: can rm -- to deal with equal time values
// for (j=0;j<n;j++) {
// sy=smo[j]*w[j];
// fbw=w[j];
// j0=j;
// while (j<n-1 && x[j+1]<=x[j]) {
// j+=1;
// sy+=w[j]*smo[j];
// fbw+=w[j];
// }
// if (j>j0) {
// a=0.0;
// if (fbw>0) a=sy/fbw;
// for (i=j0;i<=j;i++) smo[i]=a;
// }
// }
return 0;
}
//Copied/pasted comments from original fortran code by Friedman
// input:
// n : number of observations (x,y - pairs).
// x(n) : ordered abscissa values.
// y(n) : corresponding ordinate (response) values.
// w(n) : weight for each (x,y) observation.
// iper : periodic variable flag.
// iper=1 => x is ordered interval variable.
// iper=2 => x is a periodic variable with values
// in the range (0.0,1.0) and period 1.0.
// span : smoother span (fraction of observations in window).
// span=0.0 => automatic (variable) span selection.
// alpha : controles high frequency (small span) penality
// used with automatic span selection (bass tone control).
// (alpha.le.0.0 or alpha.gt.10.0 => no effect.)
// output:
// smo(n) : smoothed ordinate (response) values.
// scratch:
// sc(n,8) : internal working storage.
__global__ void supsmukernel(const int numThreads, const int n, const int iper, const DTYPE span, const DTYPE alpha, DTYPE * smo,
DTYPE * sc, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
if (tid>=numThreads)
{
return;
}
const unsigned int dataOffset=tid*n;
const unsigned int scOffset=tid*n*8;
//global memory
// pointers to time, data, offset
DTYPE * x=t1_sortby_argkeys+dataOffset;
DTYPE * y=data_sortby_argkeys+dataOffset;
DTYPE * w=weights_sortby_argkeys+dataOffset;
DTYPE * smo_thread=smo+dataOffset;
DTYPE * sc_thread=sc+scOffset;
//Store thread-local variables in shared-memory
int i,j,jper;
DTYPE vsmlsq,sw,sy,a,scale,resmin,tmp,f;
// spans to be estimated: tweeter, midrange, and woofer
DTYPE spans[] = {0.05,0.2,0.5};
if (x[n-1]<=x[0]) {
sy=0.0;
sw=sy;
for (j=0;j<n;j++) {
sy=sy+w[j]*y[j];
sw=sw+w[j];
}
a=0.0;
if (sw>0) a=sy/sw;
for (j=0;j<n;j++) smo_thread[j] = a;
return;
}
i=n/4-1;
j=3*(i+1)-1;
scale=x[j]-x[i];
//Nat: rm-- should never be entered
// while (scale<=0) {
// if (j<n-1) j+=1;
// if (i>0) i-=1;
// scale=x[j]-x[i];
// }
vsmlsq=1.e-6*scale*scale;
jper=iper;
if (iper==2 && (x[0]<0 || x[n-1]>1)) jper=1;
if (jper<1 || jper>2) jper=1;
if (span>0) {
smoothkernel (n,x,y,w,span,jper,vsmlsq,smo_thread,sc_thread); // fixed span
return;
}
// if we made it here, the span will be estimated and variable
for (i=0;i<3;i++) {
smoothkernel (n,x,y,w,spans[i],jper,vsmlsq,sc_thread+2*i*n,sc_thread+6*n);
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+(2*i+1)*n,sc_thread+7*n);
}
for (j=0;j<n;j++) {
resmin=1.e20;
for (i=0;i<3;i++) {
if (sc_thread[j+(2*i+1)*n]<resmin) {
resmin=sc_thread[j+(2*i+1)*n];
sc_thread[j+6*n]=spans[i];
}
}
if (alpha>0 && alpha<=10 && resmin<sc_thread[j+5*n] && resmin>0) {
tmp = resmin/sc_thread[j+5*n];
if (tmp<1.e-7) tmp=1.e-7;
sc_thread[j+6*n]+=(spans[2]-sc_thread[j+6*n])*pow(tmp,10.0-alpha);
}
}
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+n,sc_thread+7*n);
for (j=0;j<n;j++) {
if (sc_thread[j+n]<=spans[0]) sc_thread[j+n]=spans[0];
if (sc_thread[j+n]>=spans[2]) sc_thread[j+n]=spans[2];
f=sc_thread[j+n]-spans[1];
if (f<0) {
f/=spans[0]-spans[1];
sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j];
} else {
f/=spans[2]-spans[1];
sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j+4*n];
}
}
smoothkernel (n,x,sc_thread+3*n,w,spans[0],-jper,vsmlsq,smo_thread,sc_thread+7*n);
return;
}
//Copied/pasted comments from original fortran code by Friedman
// input:
// n : number of observations (x,y - pairs).
// x(n) : ordered abscissa values.
// y(n) : corresponding ordinate (response) values.
// w(n) : weight for each (x,y) observation.
// iper : periodic variable flag.
// iper=1 => x is ordered interval variable.
// iper=2 => x is a periodic variable with values
// in the range (0.0,1.0) and period 1.0.
// span : smoother span (fraction of observations in window).
// span=0.0 => automatic (variable) span selection.
// alpha : controles high frequency (small span) penality
// used with automatic span selection (bass tone control).
// (alpha.le.0.0 or alpha.gt.10.0 => no effect.)
// output:
// smo(n) : smoothed ordinate (response) values.
// scratch:
// sc(n,7) : internal working storage.
__global__ void supsmukernelOneThread (const int n, DTYPE * x, DTYPE * y, DTYPE * w, const int iper, const DTYPE span, const DTYPE alpha, DTYPE * smo, DTYPE * sc)
{
// sc is scratch space (8,n)
// output is smo: smoothed version of y
int i,j,jper;
DTYPE vsmlsq,sw,sy,a,scale,resmin,tmp,f;
// spans to be estimated: tweeter, midrange, and woofer
DTYPE spans[] = {0.05,0.2,0.5};
if (x[n-1]<=x[0]) {
sy=0.0;
sw=sy;
for (j=0;j<n;j++) {
sy=sy+w[j]*y[j];
sw=sw+w[j];
}
a=0.0;
if (sw>0) a=sy/sw;
for (j=0;j<n;j++) smo[j] = a;
return;
}
i=n/4-1;
j=3*(i+1)-1;
scale=x[j]-x[i];
//Nat: can be removed
// while (scale<=0) {
// if (j<n-1) j+=1;
// if (i>0) i-=1;
// scale=x[j]-x[i];
// }
vsmlsq=1.e-6*scale*scale;
jper=iper;
if (iper==2 && (x[0]<0 || x[n-1]>1)) jper=1;
if (jper<1 || jper>2) jper=1;
if (span>0) {
smoothkernel (n,x,y,w,span,jper,vsmlsq,smo,sc); // fixed span
return;
}
// if we made it here, the span will be estimated and variable
for (i=0;i<3;i++) {
smoothkernel (n,x,y,w,spans[i],jper,vsmlsq,sc+2*i*n,sc+6*n);
smoothkernel (n,x,sc+6*n,w,spans[1],-jper,vsmlsq,sc+(2*i+1)*n,sc+7*n);
}
for (j=0;j<n;j++) {
resmin=1.e20;
for (i=0;i<3;i++) {
if (sc[j+(2*i+1)*n]<resmin) {
resmin=sc[j+(2*i+1)*n];
sc[j+6*n]=spans[i];
}
}
if (alpha>0 && alpha<=10 && resmin<sc[j+5*n] && resmin>0) {
tmp = resmin/sc[j+5*n];
if (tmp<1.e-7) tmp=1.e-7;
sc[j+6*n]+=(spans[2]-sc[j+6*n])*pow(tmp,10.0-alpha);
}
}
smoothkernel (n,x,sc+6*n,w,spans[1],-jper,vsmlsq,sc+n,sc+7*n);
for (j=0;j<n;j++) {
if (sc[j+n]<=spans[0]) sc[j+n]=spans[0];
if (sc[j+n]>=spans[2]) sc[j+n]=spans[2];
f=sc[j+n]-spans[1];
if (f<0) {
f/=spans[0]-spans[1];
sc[j+3*n]=(1.0-f)*sc[j+2*n]+f*sc[j];
} else {
f/=spans[2]-spans[1];
sc[j+3*n]=(1.0-f)*sc[j+2*n]+f*sc[j+4*n];
}
}
smoothkernel (n,x,sc+3*n,w,spans[0],-jper,vsmlsq,smo,sc+7*n);
return;
}
//All SM synchronization must occur before and after the function
__forceinline__ __device__ void parReductionMaximumPowerinSM(DTYPE maxPowerForComputingPeriod[], unsigned int maxPowerIdxForComputingPeriod[])
{
int i = blockDim.x / 2;
while (i != 0) {
if(threadIdx.x < i && maxPowerForComputingPeriod[threadIdx.x + i] > maxPowerForComputingPeriod[threadIdx.x])
{
maxPowerForComputingPeriod[threadIdx.x] = maxPowerForComputingPeriod[threadIdx.x + i];
maxPowerIdxForComputingPeriod[threadIdx.x] = maxPowerIdxForComputingPeriod[threadIdx.x + i];
}
__syncthreads();
i/=2;
}
}
//one small block computes a single frequency
//DEPRECATED: This kernel design does not perform well (worse than the global memory kernel and the SM kernel one thread per freq)
/*
__global__ void supsmukernelSMOneFreqBlock(const int n, const int iper, const DTYPE span, const DTYPE alpha, DTYPE * smo,
DTYPE * sc, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int i,j,jper;
DTYPE sw,sy,a,resmin,tmp,f;
//passed in at runtime through kernel
//space for x, y, and w arrays in SM
extern __shared__ DTYPE xyw[];
__shared__ DTYPE * x;
__shared__ DTYPE * y;
__shared__ DTYPE * w;
__shared__ DTYPE * sc_thread;
__shared__ unsigned int dataOffset;
__shared__ DTYPE spans[3];
__shared__ DTYPE vsmlsq;
__shared__ DTYPE scale;
DTYPE * smo_thread;
if(threadIdx.x==0)
{
//Compiling with std=c++14
//says that this causes an invalid bitcast?
x=xyw;
y=xyw+(n);
w=xyw+(2*n);
i=n/4-1;
j=3*(i+1)-1;
scale=x[j]-x[i];
vsmlsq=1.e-6*scale*scale;
dataOffset=blockIdx.x*n;
smo_thread=smo+dataOffset;
sc_thread=sc+blockIdx.x*n*8;
spans[0]=0.05;
spans[1]=0.2;
spans[2]=0.5;
}
__syncthreads();
for (int i=0; i<n && ((i+threadIdx.x)<n); i+=blockDim.x)
{
int idx=i+threadIdx.x;
x[idx]=t1_sortby_argkeys[dataOffset+idx];
y[idx]=data_sortby_argkeys[dataOffset+idx];
w[idx]=weights_sortby_argkeys[dataOffset+idx];
}
__syncthreads();
//////////////////////////////////
//Below is the original supsmu code
//Use one thread in the block
if (threadIdx.x==0)
{
if (x[n-1]<=x[0]) {
sy=0.0;
sw=sy;
for (j=0;j<n;j++) {
sy=sy+w[j]*y[j];
sw=sw+w[j];
}
a=0.0;
if (sw>0) a=sy/sw;
for (j=0;j<n;j++) smo_thread[j] = a;
return;
}
//Nat: can be removed
// while (scale<=0) {
// if (j<n-1) j+=1;
// if (i>0) i-=1;
// scale=x[j]-x[i];
// }
jper=iper;
if (iper==2 && (x[0]<0 || x[n-1]>1)) jper=1;
if (jper<1 || jper>2) jper=1;
if (span>0) {
smoothkernel (n,x,y,w,span,jper,vsmlsq,smo_thread,sc_thread); // fixed span
return;
}
} //end if threadIdx.x==0
// if we made it here, the span will be estimated and variable
__syncthreads();
//original
//Use one thread in the block
if (threadIdx.x==0)
{
for (i=0;i<3;i++) {
smoothkernel (n,x,y,w,spans[i],jper,vsmlsq,sc_thread+2*i*n,sc_thread+6*n);
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+(2*i+1)*n,sc_thread+7*n);
}
}
__syncthreads();
//original
// for (j=0;j<n;j++) {
// resmin=1.e20;
// for (i=0;i<3;i++) {
// if (sc_thread[j+(2*i+1)*n]<resmin) {
// resmin=sc_thread[j+(2*i+1)*n];
// sc_thread[j+6*n]=spans[i];
// }
// }
// if (alpha>0 && alpha<=10 && resmin<sc_thread[j+5*n] && resmin>0) {
// tmp = resmin/sc_thread[j+5*n];
// if (tmp<1.e-7) tmp=1.e-7;
// sc_thread[j+6*n]+=(spans[2]-sc_thread[j+6*n])*pow(tmp,10.0-alpha);
// }
// }
//parallelized with threads
for (j=0;j<n && ((j+threadIdx.x)<n);j+=blockDim.x) {
int idx=j+threadIdx.x;
resmin=1.e20;
for (i=0;i<3;i++) {
if (sc_thread[idx+(2*i+1)*n]<resmin) {
resmin=sc_thread[idx+(2*i+1)*n];
sc_thread[idx+6*n]=spans[i];
}
}
if (alpha>0 && alpha<=10 && resmin<sc_thread[idx+5*n] && resmin>0) {
tmp = resmin/sc_thread[idx+5*n];
if (tmp<1.e-7) tmp=1.e-7;
sc_thread[idx+6*n]+=(spans[2]-sc_thread[idx+6*n])*pow(tmp,10.0-alpha);
}
}
__syncthreads();
//one thread
if (threadIdx.x==0)
{
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+n,sc_thread+7*n);
}
__syncthreads();
//original
// for (j=0;j<n;j++) {
// if (sc_thread[j+n]<=spans[0]) sc_thread[j+n]=spans[0];
// if (sc_thread[j+n]>=spans[2]) sc_thread[j+n]=spans[2];
// f=sc_thread[j+n]-spans[1];
// if (f<0) {
// f/=spans[0]-spans[1];
// sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j];
// } else {
// f/=spans[2]-spans[1];
// sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j+4*n];
// }
// }
//parallelized with threads
for (j=0;j<n && ((j+threadIdx.x)<n);j+=blockDim.x) {
// for (j=0;j<n;j++) {
int idx=j+threadIdx.x;
if (sc_thread[idx+n]<=spans[0]) sc_thread[idx+n]=spans[0];
if (sc_thread[idx+n]>=spans[2]) sc_thread[idx+n]=spans[2];
f=sc_thread[idx+n]-spans[1];
if (f<0) {
f/=spans[0]-spans[1];
sc_thread[idx+3*n]=(1.0-f)*sc_thread[idx+2*n]+f*sc_thread[idx];
} else {
f/=spans[2]-spans[1];
sc_thread[idx+3*n]=(1.0-f)*sc_thread[idx+2*n]+f*sc_thread[idx+4*n];
}
}
__syncthreads();
//only one thread computes this
if(threadIdx.x==0)
{
smoothkernel (n,x,sc_thread+3*n,w,spans[0],-jper,vsmlsq,smo_thread,sc_thread+7*n);
}
return;
}
*/
//kernel gets called after the main kernel
//some number of threads per frequency
__global__ void computePgramReduction(const int batchwriteoffset, const int numThreadsPerFreq, const DTYPE chi0, const int n, const int numFreq, DTYPE * smo, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys, DTYPE * pgram)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
extern __shared__ DTYPE globalSum[];
int freqNum=i/numThreadsPerFreq;
int threadInFreq=i%numThreadsPerFreq;
int freqInBlock=threadIdx.x/numThreadsPerFreq;
DTYPE localSum=0;
if(i<(numFreq*numThreadsPerFreq))
{
if (threadInFreq==0)
{
globalSum[freqInBlock]=0;
}
} //end if(i<(numFreq*numThreadsPerFreq))
__syncthreads();
if(i<(numFreq*numThreadsPerFreq))
{
int idxmin=(freqNum*n);
int idxmax=(idxmin+n);
for (int k=idxmin; k<idxmax; k+=numThreadsPerFreq){
// int idx=(freqNum*n)+(k+threadInFreq);
int idx=k+threadInFreq;
if (idx<idxmax)
{
localSum+=((data_sortby_argkeys[idx]-smo[idx])*(data_sortby_argkeys[idx]-smo[idx]))*weights_sortby_argkeys[idx];
}
}
}
if(i<(numFreq*numThreadsPerFreq))
{
atomicAdd(&globalSum[freqInBlock],localSum);
}
__syncthreads();
if (threadInFreq==0 && i<(numFreq*numThreadsPerFreq))
{
// chi2[batchwriteoffset+freqNum]=globalSum[freqInBlock]/(n*1.0);
// pgram[batchwriteoffset+freqNum]=(0.5*(chi0-chi2[batchwriteoffset+freqNum])*n)/chi0;
DTYPE chi2=globalSum[freqInBlock]/(n*1.0);
pgram[batchwriteoffset+freqNum]=(0.5*(chi0-chi2)*n)/chi0;
}
}
//kernel gets called after the main kernel
//some number of threads per frequency
//data, weights reordered for coalesced memory accesses
// x[j] -> x[freqNum+(numFreq*j)]
__global__ void computePgramReductionCoalesced(const int batchwriteoffset, const int numThreadsPerFreq, const DTYPE chi0, const int n, const int numFreq, DTYPE * smo, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys, DTYPE * pgram)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
extern __shared__ DTYPE globalSum[];
int freqNum=i/numThreadsPerFreq;
int threadInFreq=i%numThreadsPerFreq;
int freqInBlock=threadIdx.x/numThreadsPerFreq;
DTYPE localSum=0;
if(i<(numFreq*numThreadsPerFreq))
{
if (threadInFreq==0)
{
globalSum[freqInBlock]=0;
}
} //end if(i<(numFreq*numThreadsPerFreq))
__syncthreads();
if(i<(numFreq*numThreadsPerFreq))
{
int idxmin=(freqNum*n);
int idxmax=(idxmin+n);
for (int k=idxmin; k<idxmax; k+=numThreadsPerFreq){
int idx=k+threadInFreq;
int idxCoalesced=freqNum+(numFreq*(idx-idxmin)); //idx-idxmin because its one big long array, not an array with n elements
if (idx<idxmax)
{
localSum+=((data_sortby_argkeys[idxCoalesced]-smo[idx])*(data_sortby_argkeys[idxCoalesced]-smo[idx]))*weights_sortby_argkeys[idxCoalesced];
}
}
}
if(i<(numFreq*numThreadsPerFreq))
{
atomicAdd(&globalSum[freqInBlock],localSum);
}
__syncthreads();
if (threadInFreq==0 && i<(numFreq*numThreadsPerFreq))
{
// chi2[batchwriteoffset+freqNum]=globalSum[freqInBlock]/(n*1.0);
// pgram[batchwriteoffset+freqNum]=(0.5*(chi0-chi2[batchwriteoffset+freqNum])*n)/chi0;
DTYPE chi2=globalSum[freqInBlock]/(n*1.0);
pgram[batchwriteoffset+freqNum]=(0.5*(chi0-chi2)*n)/chi0;
}
}
__global__ void computePeriodModFOneThreadPerUpdate(const int n, const int numFreq, const DTYPE minFreq, const uint64_t freqOffset, const DTYPE deltaf, DTYPE * t1, DTYPE * tt)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
if (i>=(n*numFreq))
{
return;
}
int freqNum=i/n;
DTYPE p=1.0/((minFreq)+(deltaf*(freqOffset+freqNum)));
t1[i]=fmod(tt[i%n],p)/p;
}
__global__ void initializeKeyArraysOneThreadPerUpdate(const int n, const int numFreq, int * argkeys, int * freqId)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
if (i>=(n*numFreq))
{
return;
}
int freqNum=i/n;
//iota
argkeys[i]=i%n;
//same frequency id for the freqId array
freqId[i]=freqNum;
}
__global__ void mapUsingArgKeysOneThreadPerUpdate(const int n, const int numFreq, int * argkeys, DTYPE * data, DTYPE * weights, DTYPE * t1, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
if (i>=(n*numFreq))
{
return;
}
//t1 has already been sorted. Only make a copy.
t1_sortby_argkeys[i]=t1[i];
//map between t1 argkeys and data and weights
data_sortby_argkeys[i]=data[argkeys[i]];
weights_sortby_argkeys[i]=weights[argkeys[i]];
}
//used for coalesced memory mapping
__global__ void mapUsingArgKeysOneThreadPerUpdateAndReorderCoalesced(const int n, const int numFreq, int * argkeys, DTYPE * data, DTYPE * weights, DTYPE * t1, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int i=threadIdx.x+ (blockIdx.x*blockDim.x);
if (i>=(n*numFreq))
{
return;
}
const int idxInFreq=(i%n);
const int freqNum=i/n;
const int idxWrite=(idxInFreq*numFreq)+freqNum;
//t1 has already been sorted. Only make a copy.
// t1_sortby_argkeys[idxWrite]=t1[argkeys[i]];
t1_sortby_argkeys[idxWrite]=t1[i];
data_sortby_argkeys[idxWrite]=data[argkeys[i]];
weights_sortby_argkeys[idxWrite]=weights[argkeys[i]];
}
//Uses SM but uses one thread per frequency
__global__ void supsmukernelSMOneThreadPerFreq(const int numFreq, const int n, const int iper, const DTYPE span, const DTYPE alpha,
DTYPE * smo, DTYPE * sc, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
unsigned int dataOffset=tid*n;
unsigned int scOffset=tid*n*8;
DTYPE * smo_thread=smo+dataOffset;
DTYPE * sc_thread=sc+scOffset;
//size passed in at runtime through kernel
extern __shared__ DTYPE xyw[];
__shared__ DTYPE spans[3];
DTYPE * x=xyw+(threadIdx.x*3*n);
DTYPE * y=xyw+(threadIdx.x*3*n)+(n);
DTYPE * w=xyw+(threadIdx.x*3*n)+(2*n);
//One thread copies the spans into SM
if(threadIdx.x==0)
{
spans[0]=0.05;
spans[1]=0.2;
spans[2]=0.5;
}
__syncthreads();
if(tid<numFreq)
{
for (int i=0; i<n; i++)
{
x[i]=t1_sortby_argkeys[dataOffset+i];
y[i]=data_sortby_argkeys[dataOffset+i];
w[i]=weights_sortby_argkeys[dataOffset+i];
}
}
//////////////////////////////////
//Below is the original supsmu code
int i,j,jper;
DTYPE vsmlsq,sw,sy,a,scale,resmin,tmp,f;
if(tid<numFreq)
{
if (x[n-1]<=x[0]) {
sy=0.0;
sw=sy;
for (j=0;j<n;j++) {
sy=sy+w[j]*y[j];
sw=sw+w[j];
}
a=0.0;
if (sw>0) a=sy/sw;
for (j=0;j<n;j++) smo_thread[j] = a;
return;
}
i=n/4-1;
j=3*(i+1)-1;
scale=x[j]-x[i];
vsmlsq=1.e-6*scale*scale;
jper=iper;
if (iper==2 && (x[0]<0 || x[n-1]>1)) jper=1;
if (jper<1 || jper>2) jper=1;
if (span>0) {
smoothkernel (n,x,y,w,span,jper,vsmlsq,smo_thread,sc_thread); // fixed span
return;
}
//Nat: if we made it here, the span will be estimated and variable
for (i=0;i<3;i++) {
smoothkernel (n,x,y,w,spans[i],jper,vsmlsq,sc_thread+2*i*n,sc_thread+6*n);
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+(2*i+1)*n,sc_thread+7*n);
}
for (j=0;j<n;j++) {
resmin=1.e20;
for (i=0;i<3;i++) {
if (sc_thread[j+(2*i+1)*n]<resmin) {
resmin=sc_thread[j+(2*i+1)*n];
sc_thread[j+6*n]=spans[i];
}
}
if (alpha>0 && alpha<=10 && resmin<sc_thread[j+5*n] && resmin>0) {
tmp = resmin/sc_thread[j+5*n];
if (tmp<1.e-7) tmp=1.e-7;
sc_thread[j+6*n]+=(spans[2]-sc_thread[j+6*n])*pow(tmp,10.0-alpha);
}
}
smoothkernel (n,x,sc_thread+6*n,w,spans[1],-jper,vsmlsq,sc_thread+n,sc_thread+7*n);
for (j=0;j<n;j++) {
if (sc_thread[j+n]<=spans[0]) sc_thread[j+n]=spans[0];
if (sc_thread[j+n]>=spans[2]) sc_thread[j+n]=spans[2];
f=sc_thread[j+n]-spans[1];
if (f<0) {
f/=spans[0]-spans[1];
sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j];
} else {
f/=spans[2]-spans[1];
sc_thread[j+3*n]=(1.0-f)*sc_thread[j+2*n]+f*sc_thread[j+4*n];
}
}
smoothkernel (n,x,sc_thread+3*n,w,spans[0],-jper,vsmlsq,smo_thread,sc_thread+7*n);
}//end the if(tid<numFreq) around everything
return;
}
//Smooth() function for single pass
//Nat's updated function
__device__ void smoothSinglePassCoalesced(const int n, const int freqNum, const int numFreq, int * ibw, DTYPE * x, DTYPE * y, DTYPE * w, const DTYPE vsmlsq, const int alpha, DTYPE * smo)
{
int i,j,in,out;
DTYPE wt,xto,xti,yto,yti,ibwb,a,f,chi2,chi2m,tmp,fbo,vary=0.0;
// DTYPE fbw[3],xm[3],ym[3],smo0[3],var[3]={0,0,0},cvar[3]={0,0,0};
//Original: if arrays stored in registers
DTYPE fbw[3],xm[3],ym[3],smo0[3],var[3]={0,0,0},cvar[3]={0,0,0};
const int offset=0;
// //end original if using arrays in registers
//If SINGLEPASSMODE==0 or SINGLEPASSMODE==2 (if not (SINGLEPASSMODE==1), then don't need to multiply by SMALLBLOCKSIZE)
//If using SM for these arrays
// __shared__ DTYPE fbw[SMALLBLOCKSIZE*3];
// __shared__ DTYPE xm[SMALLBLOCKSIZE*3];
// __shared__ DTYPE ym[SMALLBLOCKSIZE*3];
// __shared__ DTYPE smo0[SMALLBLOCKSIZE*3];
// __shared__ DTYPE var[SMALLBLOCKSIZE*3];
// __shared__ DTYPE cvar[SMALLBLOCKSIZE*3];
// const int offset=threadIdx.x*3;
// for (i=0; i<3; i++)
// {
// var[offset+i]=0;
// cvar[offset+i]=0;
// }
//data access transformation for coalesced memory accesses
// x[j] -> x[freqNum+(numFreq*j)]
for (i=0;i<3;i++) {
j=n-ibw[i]-1;
// xm[offset+i]=x[j]-1.0;
// ym[offset+i]=y[j];
// fbw[offset+i]=w[j];
xm[offset+i]=x[freqNum+(numFreq*j)]-1.0;
ym[offset+i]=y[freqNum+(numFreq*j)];
fbw[offset+i]=w[freqNum+(numFreq*j)];
for (j=n-ibw[i];j<n;j++) {
// xti=x[j]-1.0;
// yti=y[j];
// wt=w[j];
xti=x[freqNum+(numFreq*j)]-1.0;
yti=y[freqNum+(numFreq*j)];
wt=w[freqNum+(numFreq*j)];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
for (j=0;j<ibw[i];j++) {
// xti=x[j];
// yti=y[j];
// wt=w[j];
xti=x[freqNum+(numFreq*j)];
yti=y[freqNum+(numFreq*j)];
wt=w[freqNum+(numFreq*j)];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
}
for (j=0;j<n;j++) {
for (i=0;i<3;i++) {
out=j-ibw[i]-1;
in=j+ibw[i];
if (in>n-1) {
in-=n;
// xti=x[in]+1.0;
xti=x[freqNum+(numFreq*in)]+1.0;
// } else xti=x[in];
} else xti=x[freqNum+(numFreq*in)];
if (out<0) {
out+=n;
// xto=x[out]-1.0;
xto=x[freqNum+(numFreq*out)]-1.0;
// } else xto=x[out];
} else xto=x[freqNum+(numFreq*out)];
// yti=y[in];
// yto=y[out];
yti=y[freqNum+(numFreq*in)];
yto=y[freqNum+(numFreq*out)];
// wt=w[out];
wt=w[freqNum+(numFreq*out)];
fbo=fbw[offset+i];
fbw[offset+i]-=wt;
tmp=fbo*wt*(xto-xm[offset+i])/fbw[offset+i];
var[offset+i]-=tmp*(xto-xm[offset+i]);
cvar[offset+i]-=tmp*(yto-ym[offset+i]);
if (i==0) vary-=fbo*wt*(yto-ym[offset+0])*(yto-ym[offset+0])/fbw[offset+0];
xm[offset+i]=(fbo*xm[offset+i]-wt*xto)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]-wt*yto)/fbw[offset+i];
// wt=w[in];
wt=w[freqNum+(numFreq*in)];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
chi2m=1.e20; ibwb=ibw[2];
for (i=0;i<3;i++) {
a=0.0;
if (var[offset+i]>vsmlsq) a=cvar[offset+i]/var[offset+i];
// smo0[offset+i]=a*(x[j]-xm[offset+i])+ym[offset+i];
smo0[offset+i]=a*(x[freqNum+(numFreq*j)]-xm[offset+i])+ym[offset+i];
chi2 = vary-2*a*cvar[offset+0]+a*a*var[offset+0];
if (i>0) {
tmp = ym[offset+i]-ym[offset+0]-a*(xm[offset+i]-xm[offset+0]);
chi2 += tmp*tmp*fbw[offset+0];
}
tmp=1.0/fbw[offset+i];
// if (var[offset+i]>vsmlsq) tmp+=(x[j]-xm[offset+i])*(x[j]-xm[offset+i])/var[offset+i];
if (var[offset+i]>vsmlsq) tmp+=(x[freqNum+(numFreq*j)]-xm[offset+i])*(x[freqNum+(numFreq*j)]-xm[offset+i])/var[offset+i];
// tmp = 1.0 - w[j]*tmp;
tmp = 1.0 - w[freqNum+(numFreq*j)]*tmp;
chi2 = fabs(chi2)/(tmp*tmp);
if (chi2<chi2m) {
chi2m=chi2;
ibwb=(ibw[1]+ibw[i])/2.;
}
}
tmp = sqrt(chi2m/chi2);
if (tmp<1.e-7) tmp=1.e-7;
ibwb+=(ibw[2]-ibwb)*pow(tmp,10.-alpha);
f = ibwb-ibw[1];
if (f<0) {
f/=ibw[0]-ibw[1];
smo[j]=(1.0-f)*smo0[offset+1]+f*smo0[offset+0];
} else {
f/=ibw[2]-ibw[1];
smo[j]=(1.0-f)*smo0[offset+1]+f*smo0[offset+2];
}
}
}
//Smooth() function for single pass
//Nat's updated function
__device__ void smoothSinglePass(const int n, int * ibw, DTYPE * x, DTYPE * y, DTYPE * w, const DTYPE vsmlsq, const int alpha, DTYPE * smo)
{
int i,j,in,out;
DTYPE wt,xto,xti,yto,yti,ibwb,a,f,chi2,chi2m,tmp,fbo,vary=0.0;
// DTYPE fbw[3],xm[3],ym[3],smo0[3],var[3]={0,0,0},cvar[3]={0,0,0};
//Original: if arrays stored in registers
DTYPE fbw[3],xm[3],ym[3],smo0[3],var[3]={0,0,0},cvar[3]={0,0,0};
const int offset=0;
//end original if using arrays in registers
//If SINGLEPASSMODE==2 (if not, then don't need to multiply by SMALLBLOCKSIZE)
//If using SM for these arrays
// __shared__ DTYPE fbw[SMALLBLOCKSIZE*3];
// __shared__ DTYPE xm[SMALLBLOCKSIZE*3];
// __shared__ DTYPE ym[SMALLBLOCKSIZE*3];
// __shared__ DTYPE smo0[SMALLBLOCKSIZE*3];
// __shared__ DTYPE var[SMALLBLOCKSIZE*3];
// __shared__ DTYPE cvar[SMALLBLOCKSIZE*3];
// const int offset=threadIdx.x*3;
// for (i=0; i<3; i++)
// {
// var[offset+i]=0;
// cvar[offset+i]=0;
// }
//end if using SM for these arrays
for (i=0;i<3;i++) {
j=n-ibw[i]-1;
xm[offset+i]=x[j]-1.0;
ym[offset+i]=y[j];
fbw[offset+i]=w[j];
for (j=n-ibw[i];j<n;j++) {
xti=x[j]-1.0;
yti=y[j];
wt=w[j];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
for (j=0;j<ibw[i];j++) {
xti=x[j];
yti=y[j];
wt=w[j];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
}
for (j=0;j<n;j++) {
for (i=0;i<3;i++) {
out=j-ibw[i]-1;
in=j+ibw[i];
if (in>n-1) {
in-=n;
xti=x[in]+1.0;
} else xti=x[in];
if (out<0) {
out+=n;
xto=x[out]-1.0;
} else xto=x[out];
yti=y[in];
yto=y[out];
wt=w[out];
fbo=fbw[offset+i];
fbw[offset+i]-=wt;
tmp=fbo*wt*(xto-xm[offset+i])/fbw[offset+i];
var[offset+i]-=tmp*(xto-xm[offset+i]);
cvar[offset+i]-=tmp*(yto-ym[offset+i]);
if (i==0) vary-=fbo*wt*(yto-ym[offset+0])*(yto-ym[offset+0])/fbw[offset+0];
xm[offset+i]=(fbo*xm[offset+i]-wt*xto)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]-wt*yto)/fbw[offset+i];
wt=w[in];
fbo=fbw[offset+i];
fbw[offset+i]+=wt;
xm[offset+i]=(fbo*xm[offset+i]+wt*xti)/fbw[offset+i];
ym[offset+i]=(fbo*ym[offset+i]+wt*yti)/fbw[offset+i];
tmp=fbw[offset+i]*wt*(xti-xm[offset+i])/fbo;
var[offset+i]+=tmp*(xti-xm[offset+i]);
cvar[offset+i]+=tmp*(yti-ym[offset+i]);
if (i==0) vary+=fbw[offset+0]*wt*(yti-ym[offset+0])*(yti-ym[offset+0])/fbo;
}
chi2m=1.e20; ibwb=ibw[2];
for (i=0;i<3;i++) {
a=0.0;
if (var[offset+i]>vsmlsq) a=cvar[offset+i]/var[offset+i];
smo0[offset+i]=a*(x[j]-xm[offset+i])+ym[offset+i];
chi2 = vary-2*a*cvar[offset+0]+a*a*var[offset+0];
if (i>0) {
tmp = ym[offset+i]-ym[offset+0]-a*(xm[offset+i]-xm[offset+0]);
chi2 += tmp*tmp*fbw[offset+0];
}
tmp=1.0/fbw[offset+i];
if (var[offset+i]>vsmlsq) tmp+=(x[j]-xm[offset+i])*(x[j]-xm[offset+i])/var[offset+i];
tmp = 1.0 - w[j]*tmp;
chi2 = fabs(chi2)/(tmp*tmp);
if (chi2<chi2m) {
chi2m=chi2;
ibwb=(ibw[1]+ibw[i])/2.;
}
}
tmp = sqrt(chi2m/chi2);
if (tmp<1.e-7) tmp=1.e-7;
ibwb+=(ibw[2]-ibwb)*pow(tmp,10.-alpha);
f = ibwb-ibw[1];
if (f<0) {
f/=ibw[0]-ibw[1];
smo[j]=(1.0-f)*smo0[offset+1]+f*smo0[offset+0];
} else {
f/=ibw[2]-ibw[1];
smo[j]=(1.0-f)*smo0[offset+1]+f*smo0[offset+2];
}
}
}
__global__ void supsmukernelSinglePassSMOneBlockPerFreq(const int n, const DTYPE inalpha, DTYPE * smo, DTYPE * t1_sortby_argkeys,
DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
const unsigned int dataOffset=blockIdx.x*n;
DTYPE * smo_thread=smo+dataOffset;
//Shared memory
//size passed in at runtime through kernel
extern __shared__ DTYPE xyw[];
DTYPE * x=xyw;
DTYPE * y=xyw+(n);
DTYPE * w=xyw+(2*n);
//SM
__shared__ int ibw[3];
__shared__ DTYPE spans[3];
__shared__ DTYPE scale;
__shared__ DTYPE vsmlsq;
__shared__ DTYPE alpha;
for (int i=0; i<n && ((i+threadIdx.x)<n); i++)
{
int idx=i+threadIdx.x;
x[idx]=t1_sortby_argkeys[dataOffset+idx];
y[idx]=data_sortby_argkeys[dataOffset+idx];
w[idx]=weights_sortby_argkeys[dataOffset+idx];
}
__syncthreads();
//one thread in the block computes everything
if (threadIdx.x==0)
{
int i=n/4-1;
int j=3*(i+1)-1;
scale=x[j]-x[i];
vsmlsq=1.e-6*scale*scale;
alpha=inalpha;
if (alpha<0) alpha=0;
if (alpha>10) alpha=10;
spans[0]=0.05;
spans[1]=0.2;
spans[2]=0.5;
for (int i=0;i<3;i++) {
ibw[i] = (int)( 0.5*spans[i]*n+0.5 );
if (ibw[i]<2) ibw[i]=2;
}
smoothSinglePass(n, ibw, x, y, w, vsmlsq, alpha, smo_thread);
}
return;
}
__global__ void supsmukernelSinglePassSMOneThreadPerFreq(const int numFreq, const int n, const DTYPE inalpha, DTYPE * smo,
DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
if (tid>=numFreq)
{
return;
}
const unsigned int dataOffset=tid*n;
DTYPE * smo_thread=smo+dataOffset;
//Shared memory
//size passed in at runtime through kernel
extern __shared__ DTYPE xyw[];
DTYPE * x=xyw+(threadIdx.x*3*n);
DTYPE * y=xyw+(threadIdx.x*3*n)+(n);
DTYPE * w=xyw+(threadIdx.x*3*n)+(2*n);
for (int i=0; i<n; i++)
{
x[i]=t1_sortby_argkeys[dataOffset+i];
y[i]=data_sortby_argkeys[dataOffset+i];
w[i]=weights_sortby_argkeys[dataOffset+i];
}
//use SM
//each thread needs its own ibw
// __shared__ int ibwSM[3*SMALLBLOCKSIZE];
__shared__ int ibw[3*SMALLBLOCKSIZE];
//Constant for all frequencies
__shared__ DTYPE spans[3];
__shared__ DTYPE alpha;
int i=n/4-1;
int j=3*(i+1)-1;
DTYPE scale=x[j]-x[i];
DTYPE vsmlsq=1.e-6*scale*scale;
if (threadIdx.x==0)
{
//constant for all frequencies
alpha=inalpha;
if (alpha<0) alpha=0;
if (alpha>10) alpha=10;
spans[0]=0.05;
spans[1]=0.2;
spans[2]=0.5;
}
__syncthreads();
//update pointer to my ibw
// int * ibw=&ibwSM[threadIdx.x*3];
// for (int i=0;i<3;i++) {
// ibw[i] = (int)( 0.5*spans[i]*n+0.5 );
// if (ibw[i]<2) ibw[i]=2;
// }
for (int i=0;i<3;i++) {
ibw[threadIdx.x*3+i] = (int)( 0.5*spans[i]*n+0.5 );
if (ibw[threadIdx.x*3+i]<2) ibw[threadIdx.x*3+i]=2;
}
smoothSinglePass(n, &ibw[threadIdx.x*3], x, y, w, vsmlsq, alpha, smo_thread);
return;
}
//global memory baseline for singlepass
__global__ void supsmukernelSinglePassGlobalMemory(const int numFreq, const int n,
const DTYPE inalpha, DTYPE * smo, DTYPE * tt, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
if (tid>=numFreq)
{
return;
}
const unsigned int dataOffset=tid*n;
DTYPE * smo_thread=smo+dataOffset;
// pointers to time, data, offset
DTYPE * x=t1_sortby_argkeys+dataOffset;
DTYPE * y=data_sortby_argkeys+dataOffset;
DTYPE * w=weights_sortby_argkeys+dataOffset;
int ibw[3];
DTYPE spans[3] = {0.05,0.2,0.5};
int i=n/4-1;
int j=3*(i+1)-1;
DTYPE scale=x[j]-x[i];
DTYPE vsmlsq=1.e-6*scale*scale;
DTYPE alpha=inalpha;
if (alpha<0) alpha=0;
if (alpha>10) alpha=10;
for (int i=0;i<3;i++) {
ibw[i] = (int)( 0.5*spans[i]*n+0.5 );
if (ibw[i]<2) ibw[i]=2;
}
smoothSinglePass(n, ibw, x, y, w, vsmlsq, alpha, smo_thread);
return;
}
//global memory for singlepass -- coalesced memory accesses
__global__ void supsmukernelSinglePassGlobalMemoryCoalesced(const int numFreq, const int n,
const DTYPE inalpha, DTYPE * smo, DTYPE * tt, DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
if (tid>=numFreq)
{
return;
}
const unsigned int dataOffset=tid*n;
DTYPE * smo_thread=smo+dataOffset;
//original in registers
int ibw[3];
DTYPE spans[3] = {0.05,0.2,0.5};
//end original in registers
//shared memory
// __shared__ DTYPE spans[3];
// __shared__ int ibwSM[3*SMALLBLOCKSIZE];
// int * ibw=&ibwSM[threadIdx.x*3];
// if (threadIdx.x==0)
// {
// spans[0]=0.05;
// spans[1]=0.2;
// spans[2]=0.5;
// }
// __syncthreads();
//end shared memory
int i=n/4-1;
int j=3*(i+1)-1;
// DTYPE scale=x[j]-x[i];
DTYPE scale=t1_sortby_argkeys[tid+(numFreq*j)]-t1_sortby_argkeys[tid+(numFreq*i)];
DTYPE vsmlsq=1.e-6*scale*scale;
DTYPE alpha=inalpha;
if (alpha<0) alpha=0;
if (alpha>10) alpha=10;
for (int i=0;i<3;i++) {
ibw[i] = (int)( 0.5*spans[i]*n+0.5 );
if (ibw[i]<2) ibw[i]=2;
}
smoothSinglePassCoalesced(n, tid, numFreq, ibw, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys, vsmlsq, alpha, smo_thread);
return;
}
|
c82ca9bcc3ddbee4553453ee2ba8e38e045d62e6.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include "support.h"
#include "kernel.hip"
int main(int argc, char**argv) {
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
unsigned int n;
if(argc == 1) {
n = 10000;
} else if(argc == 2) {
n = atoi(argv[1]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./vecadd # Vector of size 10,000 is used"
"\n Usage: ./vecadd <m> # Vector of size m is used"
"\n");
exit(0);
}
float* A_h = (float*) malloc( sizeof(float)*n );
for (unsigned int i=0; i < n; i++) { A_h[i] = (rand()%100)/100.00; }
float* B_h = (float*) malloc( sizeof(float)*n );
for (unsigned int i=0; i < n; i++) { B_h[i] = (rand()%100)/100.00; }
float* C_h = (float*) malloc( sizeof(float)*n );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Vector size = %u\n", n);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
float* A_d;
cuda_ret =
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
float* B_d;
cuda_ret =
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
float* C_d;
cuda_ret =
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cuda_ret =
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device");
cuda_ret =
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
const unsigned int THREADS_PER_BLOCK =
const unsigned int numBlocks =
dim3 gridDim(numBlocks, 1, 1), blockDim(THREADS_PER_BLOCK, 1, 1);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory from device");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, n);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
}
| c82ca9bcc3ddbee4553453ee2ba8e38e045d62e6.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include "support.h"
#include "kernel.cu"
int main(int argc, char**argv) {
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
unsigned int n;
if(argc == 1) {
n = 10000;
} else if(argc == 2) {
n = atoi(argv[1]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./vecadd # Vector of size 10,000 is used"
"\n Usage: ./vecadd <m> # Vector of size m is used"
"\n");
exit(0);
}
float* A_h = (float*) malloc( sizeof(float)*n );
for (unsigned int i=0; i < n; i++) { A_h[i] = (rand()%100)/100.00; }
float* B_h = (float*) malloc( sizeof(float)*n );
for (unsigned int i=0; i < n; i++) { B_h[i] = (rand()%100)/100.00; }
float* C_h = (float*) malloc( sizeof(float)*n );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Vector size = %u\n", n);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
float* A_d;
cuda_ret =
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
float* B_d;
cuda_ret =
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
float* C_d;
cuda_ret =
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cuda_ret =
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device");
cuda_ret =
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
const unsigned int THREADS_PER_BLOCK =
const unsigned int numBlocks =
dim3 gridDim(numBlocks, 1, 1), blockDim(THREADS_PER_BLOCK, 1, 1);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory from device");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, n);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
359dacb9dee7a382b4e17ebba0d448cf42c28c77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary);
check_error(hipPeekAtLastError());
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
} else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
}
__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -(size/2.);
int h_offset = -(size/2.);
int out_index = j + w*(i + h*(k + c*b));
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i + l;
int cur_w = w_offset + j + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0;
}
}
}
extern "C" void smooth_layer(layer l, int size, float rate)
{
int h = l.out_h;
int w = l.out_w;
int c = l.out_c;
size_t n = h*w*c*l.batch;
hipLaunchKernelGGL(( smooth_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu);
check_error(hipPeekAtLastError());
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
if(l.smooth){
smooth_layer(l, 5, l.smooth);
}
//constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
} else {
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
int i;
for(i = 0; i < l.batch; ++i){
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu;
float * b = l.delta_gpu;
float * c = state.workspace;
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
| 359dacb9dee7a382b4e17ebba0d448cf42c28c77.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary);
check_error(cudaPeekAtLastError());
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
} else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
}
__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -(size/2.);
int h_offset = -(size/2.);
int out_index = j + w*(i + h*(k + c*b));
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i + l;
int cur_w = w_offset + j + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0;
}
}
}
extern "C" void smooth_layer(layer l, int size, float rate)
{
int h = l.out_h;
int w = l.out_w;
int c = l.out_c;
size_t n = h*w*c*l.batch;
smooth_kernel<<<cuda_gridsize(n), BLOCK>>>(l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu);
check_error(cudaPeekAtLastError());
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
if(l.smooth){
smooth_layer(l, 5, l.smooth);
}
//constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
} else {
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
int i;
for(i = 0; i < l.batch; ++i){
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu;
float * b = l.delta_gpu;
float * c = state.workspace;
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
|
fa5626c1b5a68a2685a7d4fdebb97e3bbe2635e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/Resize.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPUtils.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHGeneral.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/cub.cuh>
#include <c10/util/irange.h>
#include <c10/core/QScheme.h>
#include <limits>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
// if not accumulate, we only keep the last duplicate index so skip those before it
if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) {
idx++;
continue;
}
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
if (accumulate) {
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
if (accumulate) {
weight[ii] += gradient[ii] * scale;
} else {
weight[ii] = gradient[ii] * scale;
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& device = src.options().device();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (const auto i: c10::irange(src.dim())) {
if (indices[i].defined()) {
// Cast index to the longType matching src's device
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices);
namespace {
int64_t largestIndex(const Tensor &self) {
int64_t result = 0;
for (const auto i: c10::irange(self.dim())) {
result += (self.sizes()[i] - 1) * self.strides()[i];
}
return result;
}
void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
auto value_ = value.contiguous();
Tensor linearIndex, expandedValue, src;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
linearIndex.divide_(sliceSize, "trunc");
// cub on CUDA <= 11.2 have a bug that for small sizes
// cub's sort can be much slower than thrust's merge sort
// this bug is fixed in CUDA 11.3
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11030
if (num_indices < 50000) {
index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices);
} else
#endif
{
// Sort the inputs into sorted with the corresponding indices
auto range = at::arange(num_indices, linearIndex.options());
// linearIndex can not be negative, and we take advantage of this
// fact to sort on less bits for better performance.
int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize);
cuda::cub::sort_pairs(
linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(),
range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(),
num_indices, false, 0, nbits);
}
TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
value_.scalar_type(), "indexing_backward", [&] {
hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream,
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
value_.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore,
accumulate);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
if (permuted)
self.copy_(src_.permute(inversePerm));
}
}
REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
const auto dstDims = dst.dim();
const auto srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (const auto d: c10::irange(dstDims)) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (const auto d: c10::irange(srcDims)) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (const auto i: c10::irange(info.dims)) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar &alpha) {
dim = maybe_wrap_dim(dim, self.dim());
TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4};
checkAllSameGPU(__func__, {self_arg, index_arg, source_arg});
TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector");
TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index");
TORCH_CHECK(self.scalar_type() == source.scalar_type(),
"index_add_(): self and source must have the same scalar type");
TORCH_CHECK(dim == 0 || dim < source.dim(),
"index_add_(): Indexing dim ", dim, " is out of bounds of tensor");
TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)),
"index_add_(): Number of indices should be equal to self.size(dim)");
at::assert_no_internal_overlap(self);
at::assert_no_overlap(self, index);
at::assert_no_overlap(self, source);
// Scalars are treated as 1-d tensor
Tensor self_ = (self.dim() == 0) ? self.view(1) : self;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
TORCH_CHECK(source.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims" );
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
at::assert_no_internal_overlap(self);
at::assert_no_partial_overlap(self, index);
at::assert_no_partial_overlap(self, source);
if (globalContext().deterministicAlgorithms()){
torch::List<c10::optional<Tensor>> indices;
indices.reserve(dim + 1);
for (const auto i: c10::irange(dim)) {
indices.emplace_back();
}
indices.emplace_back(index.to(at::kLong));
return self.index_put_(indices, source * alpha, true);
}
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return self;
}
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_value); \
C10_HIP_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
}
return self;
#undef SMALL_INDEX
#undef LARGE_INDEX
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template <typename scalar_t>
void index_select_out_cuda_impl(
Tensor& out,
const Tensor& self,
long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(
index.dim() <= 1, "Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
if (self.is_quantized()){
out = at::empty_quantized(newSize, out);
} else {
at::native::resize_output(out, newSize);
}
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize); \
C10_HIP_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(
const Tensor& self,
int64_t dim,
const Tensor& index,
Tensor& out) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(
at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
at::assert_no_overlap(out, self);
at::assert_no_overlap(out, index);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] {
index_select_out_cuda_impl<scalar_t>(out, self, dim, index);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
out.scalar_type(),
"index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
}
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out;
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
out = at::empty_quantized({0}, self);
} else {
out = at::empty({0}, self.options());
}
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
namespace {
template <typename mask_t>
void masked_fill_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() {
const auto value_ = value.to<scalar_t>();
gpu_kernel(
iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t {
if (mask) {
return value_;
}
return self;
});
});
}
} // anonymous namespace
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) {
TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ",
mask.device(), " and self on ", self.device());
TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool,
"expected mask dtype to be Bool but got ", mask.scalar_type());
auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
if (at::has_internal_overlap(self) == MemOverlap::YES) {
TORCH_WARN(
"Use of masked_fill_ on expanded tensors is deprecated. "
"Please clone() the tensor before performing this operation. "
"This also applies to advanced indexing e.g. tensor[mask] = scalar");
}
at::assert_no_partial_overlap(self, mask);
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(*b_mask)
.build();
if (b_mask->dtype() == at::ScalarType::Byte) {
TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
masked_fill_kernel<uint8_t>(iter, value);
} else {
masked_fill_kernel<bool>(iter, value);
}
namedinference::propagate_names_if_nonempty(self, maybe_outnames);
return self;
}
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) {
TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "
"with ", value.dim(), " dimension(s).");
return masked_fill__cuda(self, mask, value.item());
}
} // native
} // at
| fa5626c1b5a68a2685a7d4fdebb97e3bbe2635e1.cu | #include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/Resize.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCGeneral.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/cub.cuh>
#include <c10/util/irange.h>
#include <c10/core/QScheme.h>
#include <limits>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
// if not accumulate, we only keep the last duplicate index so skip those before it
if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) {
idx++;
continue;
}
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
if (accumulate) {
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
if (accumulate) {
weight[ii] += gradient[ii] * scale;
} else {
weight[ii] = gradient[ii] * scale;
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& device = src.options().device();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (const auto i: c10::irange(src.dim())) {
if (indices[i].defined()) {
// Cast index to the longType matching src's device
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices);
namespace {
int64_t largestIndex(const Tensor &self) {
int64_t result = 0;
for (const auto i: c10::irange(self.dim())) {
result += (self.sizes()[i] - 1) * self.strides()[i];
}
return result;
}
void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
auto value_ = value.contiguous();
Tensor linearIndex, expandedValue, src;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
linearIndex.divide_(sliceSize, "trunc");
// cub on CUDA <= 11.2 have a bug that for small sizes
// cub's sort can be much slower than thrust's merge sort
// this bug is fixed in CUDA 11.3
#if defined(CUDA_VERSION) && CUDA_VERSION < 11030
if (num_indices < 50000) {
index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices);
} else
#endif
{
// Sort the inputs into sorted with the corresponding indices
auto range = at::arange(num_indices, linearIndex.options());
// linearIndex can not be negative, and we take advantage of this
// fact to sort on less bits for better performance.
int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize);
cuda::cub::sort_pairs(
linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(),
range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(),
num_indices, false, 0, nbits);
}
TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
value_.scalar_type(), "indexing_backward", [&] {
indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
value_.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore,
accumulate);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
if (permuted)
self.copy_(src_.permute(inversePerm));
}
}
REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
const auto dstDims = dst.dim();
const auto srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (const auto d: c10::irange(dstDims)) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (const auto d: c10::irange(srcDims)) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (const auto i: c10::irange(info.dims)) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar &alpha) {
dim = maybe_wrap_dim(dim, self.dim());
TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4};
checkAllSameGPU(__func__, {self_arg, index_arg, source_arg});
TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector");
TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index");
TORCH_CHECK(self.scalar_type() == source.scalar_type(),
"index_add_(): self and source must have the same scalar type");
TORCH_CHECK(dim == 0 || dim < source.dim(),
"index_add_(): Indexing dim ", dim, " is out of bounds of tensor");
TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)),
"index_add_(): Number of indices should be equal to self.size(dim)");
at::assert_no_internal_overlap(self);
at::assert_no_overlap(self, index);
at::assert_no_overlap(self, source);
// Scalars are treated as 1-d tensor
Tensor self_ = (self.dim() == 0) ? self.view(1) : self;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
TORCH_CHECK(source.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims" );
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
at::assert_no_internal_overlap(self);
at::assert_no_partial_overlap(self, index);
at::assert_no_partial_overlap(self, source);
if (globalContext().deterministicAlgorithms()){
torch::List<c10::optional<Tensor>> indices;
indices.reserve(dim + 1);
for (const auto i: c10::irange(dim)) {
indices.emplace_back();
}
indices.emplace_back(index.to(at::kLong));
return self.index_put_(indices, source * alpha, true);
}
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return self;
}
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_value); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
}
return self;
#undef SMALL_INDEX
#undef LARGE_INDEX
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template <typename scalar_t>
void index_select_out_cuda_impl(
Tensor& out,
const Tensor& self,
long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(
index.dim() <= 1, "Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
if (self.is_quantized()){
out = at::empty_quantized(newSize, out);
} else {
at::native::resize_output(out, newSize);
}
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(
const Tensor& self,
int64_t dim,
const Tensor& index,
Tensor& out) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(
at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
at::assert_no_overlap(out, self);
at::assert_no_overlap(out, index);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] {
index_select_out_cuda_impl<scalar_t>(out, self, dim, index);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
out.scalar_type(),
"index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
}
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out;
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
out = at::empty_quantized({0}, self);
} else {
out = at::empty({0}, self.options());
}
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
namespace {
template <typename mask_t>
void masked_fill_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() {
const auto value_ = value.to<scalar_t>();
gpu_kernel(
iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t {
if (mask) {
return value_;
}
return self;
});
});
}
} // anonymous namespace
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) {
TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ",
mask.device(), " and self on ", self.device());
TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool,
"expected mask dtype to be Bool but got ", mask.scalar_type());
auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
if (at::has_internal_overlap(self) == MemOverlap::YES) {
TORCH_WARN(
"Use of masked_fill_ on expanded tensors is deprecated. "
"Please clone() the tensor before performing this operation. "
"This also applies to advanced indexing e.g. tensor[mask] = scalar");
}
at::assert_no_partial_overlap(self, mask);
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(*b_mask)
.build();
if (b_mask->dtype() == at::ScalarType::Byte) {
TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
masked_fill_kernel<uint8_t>(iter, value);
} else {
masked_fill_kernel<bool>(iter, value);
}
namedinference::propagate_names_if_nonempty(self, maybe_outnames);
return self;
}
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) {
TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "
"with ", value.dim(), " dimension(s).");
return masked_fill__cuda(self, mask, value.item());
}
} // native
} // at
|
771f54beb6ed9a2dc42ac997d02271efdeaef6b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// UW ID: cchang253, syeh6
// Name: Chun-Ming Chang, Shang-Yen Yeh
#include <iostream>
#include <string>
#include "clahe.cuh"
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#define BIN_SIZE 101
using namespace std;
int main(int argc, char *argv[])
{
/*
the way to run this program should be ./program_name [input_img_name] [output_img_name] [grid_size] [threshold]
*/
if (argc < 3)
{
cout << "Didn't give enough arguments while calling CLAHE editor!\n";
}
else
{
try
{
string inputImg = argv[1];
string outputImg = argv[2];
int grid_size = stol(argv[3]);
int threshold = stol(argv[4]);
int width, height, channel;
unsigned char* rgb_image = stbi_load(inputImg.c_str(), &width, &height, &channel, 3); // 3 means RGB
int N = width * height;
int threads_per_block = grid_size * grid_size;
int num_block = ((width + grid_size - 1) / grid_size) * ((height + grid_size - 1) / grid_size);
float* dL;
float* dA;
float* dB;
unsigned char* dImg;
// Time the calculation actions exception for read and write image.
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipMallocManaged((void **) &dImg, N*channel*sizeof(unsigned char));
hipMallocManaged((void **) &dL, N * sizeof(float));
hipMallocManaged((void **) &dA, N * sizeof(float));
hipMallocManaged((void **) &dB, N * sizeof(float));
hipMemcpy(dImg, rgb_image, N*channel*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemset(dL, 0.0, N * sizeof(float));
hipMemset(dA, 0.0, N * sizeof(float));
hipMemset(dB, 0.0, N * sizeof(float));
hipLaunchKernelGGL(( transformRgbToLab), dim3(num_block), dim3(threads_per_block), 0, 0, dImg, width, height, dL, dA, dB);
hipDeviceSynchronize();
int block_dim = grid_size;
dim3 dimBlock(block_dim, block_dim);
dim3 dimGrid((width + dimBlock.x - 1)/dimBlock.x, (height+dimBlock.y -1)/dimBlock.y );
float* dCdf;
hipMallocManaged((void **) &dCdf, num_block * BIN_SIZE * sizeof(float));
hipMemset(dCdf, 0.0, num_block * BIN_SIZE * sizeof(float));
hipLaunchKernelGGL(( clahe), dim3(dimGrid), dim3(dimBlock), 0, 0, dL, width, height, threshold, dCdf);
hipDeviceSynchronize();
hipLaunchKernelGGL(( pixelInterpolate), dim3(dimGrid), dim3(dimBlock), 0, 0, dL, width, height, dCdf);
hipDeviceSynchronize();
hipLaunchKernelGGL(( transformLabToRgb), dim3(num_block), dim3(threads_per_block), 0, 0, dImg, width, height, dL, dA, dB);
hipDeviceSynchronize();
// hipMemcpy(rgb_image, dImg, N*channel*sizeof(unsigned char), hipMemcpyDeviceToHost);
// hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
hipEventElapsedTime(&ms, start, stop);
cout << ms << endl;
stbi_write_png(outputImg.c_str(), width, height, channel, dImg, width*3);
stbi_image_free(rgb_image);
hipFree(dL); hipFree(dA); hipFree(dB); hipFree(dImg);
}
catch (...)
{
cout << "Invalid argument!\n";
}
}
cout << "\n";
return 0;
} | 771f54beb6ed9a2dc42ac997d02271efdeaef6b6.cu | // UW ID: cchang253, syeh6
// Name: Chun-Ming Chang, Shang-Yen Yeh
#include <iostream>
#include <string>
#include "clahe.cuh"
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#define BIN_SIZE 101
using namespace std;
int main(int argc, char *argv[])
{
/*
the way to run this program should be ./program_name [input_img_name] [output_img_name] [grid_size] [threshold]
*/
if (argc < 3)
{
cout << "Didn't give enough arguments while calling CLAHE editor!\n";
}
else
{
try
{
string inputImg = argv[1];
string outputImg = argv[2];
int grid_size = stol(argv[3]);
int threshold = stol(argv[4]);
int width, height, channel;
unsigned char* rgb_image = stbi_load(inputImg.c_str(), &width, &height, &channel, 3); // 3 means RGB
int N = width * height;
int threads_per_block = grid_size * grid_size;
int num_block = ((width + grid_size - 1) / grid_size) * ((height + grid_size - 1) / grid_size);
float* dL;
float* dA;
float* dB;
unsigned char* dImg;
// Time the calculation actions exception for read and write image.
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMallocManaged((void **) &dImg, N*channel*sizeof(unsigned char));
cudaMallocManaged((void **) &dL, N * sizeof(float));
cudaMallocManaged((void **) &dA, N * sizeof(float));
cudaMallocManaged((void **) &dB, N * sizeof(float));
cudaMemcpy(dImg, rgb_image, N*channel*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemset(dL, 0.0, N * sizeof(float));
cudaMemset(dA, 0.0, N * sizeof(float));
cudaMemset(dB, 0.0, N * sizeof(float));
transformRgbToLab<<<num_block, threads_per_block>>>(dImg, width, height, dL, dA, dB);
cudaDeviceSynchronize();
int block_dim = grid_size;
dim3 dimBlock(block_dim, block_dim);
dim3 dimGrid((width + dimBlock.x - 1)/dimBlock.x, (height+dimBlock.y -1)/dimBlock.y );
float* dCdf;
cudaMallocManaged((void **) &dCdf, num_block * BIN_SIZE * sizeof(float));
cudaMemset(dCdf, 0.0, num_block * BIN_SIZE * sizeof(float));
clahe<<<dimGrid, dimBlock>>>(dL, width, height, threshold, dCdf);
cudaDeviceSynchronize();
pixelInterpolate<<<dimGrid, dimBlock>>>(dL, width, height, dCdf);
cudaDeviceSynchronize();
transformLabToRgb<<<num_block, threads_per_block>>>(dImg, width, height, dL, dA, dB);
cudaDeviceSynchronize();
// cudaMemcpy(rgb_image, dImg, N*channel*sizeof(unsigned char), cudaMemcpyDeviceToHost);
// cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
cudaEventElapsedTime(&ms, start, stop);
cout << ms << endl;
stbi_write_png(outputImg.c_str(), width, height, channel, dImg, width*3);
stbi_image_free(rgb_image);
cudaFree(dL); cudaFree(dA); cudaFree(dB); cudaFree(dImg);
}
catch (...)
{
cout << "Invalid argument!\n";
}
}
cout << "\n";
return 0;
} |
b270b20aab372a9c47f68e761c82166c0ac1acaa.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <ostream>
#include <iostream>
#include <iomanip>
#include <chrono>
#include <stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include <hip/hip_runtime_api.h>
#include <hip/device_functions.h>
#include "device_launch_parameters.h"
using namespace std;
double transform_matrix_cpu(
short* first_matrix,
const int first_matrix_height,
const int first_matrix_width,
short* second_matrix,
const int second_matrix_height,
const int second_matrix_width) {
auto start_cpu = chrono::steady_clock::now();
for (auto i = 0; i < first_matrix_height; i++)
for (auto j = 0; j < first_matrix_width; j+=4) {
second_matrix[(i * 4) * second_matrix_width + j / 4] = first_matrix[i * first_matrix_width + j + 3];
second_matrix[(i * 4 + 1) * second_matrix_width + j / 4] = first_matrix[i * first_matrix_width + j + 2];
second_matrix[(i * 4 + 2) * second_matrix_width + j / 4] = first_matrix[i * first_matrix_width + j + 1];
second_matrix[(i * 4 + 3) * second_matrix_width + j / 4] = first_matrix[i * first_matrix_width + j];
}
auto end_cpu = chrono::steady_clock::now();
auto cpu_time = end_cpu - start_cpu;
return chrono::duration <double, milli>(cpu_time).count();
}
__global__ void kernelSimpleGpu(
short* first_matrix,
const int first_matrix_height,
const int first_matrix_width,
short* second_matrix,
const int second_matrix_height,
const int second_matrix_width) {
int width = blockIdx.x * blockDim.x + threadIdx.x;
int height = blockIdx.y * blockDim.y + threadIdx.y;
if (width > first_matrix_width || height > first_matrix_height)
return;
int offset = width % 4;
int out_height = height * 4 + 3 - offset;
int out_width = width / 4;
second_matrix[out_height * second_matrix_width + out_width] = first_matrix[height * first_matrix_width + width];
}
float transform_matrix_gpu_simple(short* first_matrix, const int first_matrix_height, const int first_matrix_width,
short* second_matrix, const int second_matrix_height, const int second_matrix_width) {
hipEvent_t startTime;
hipEvent_t stopTime;
short* gpu_first_matrix;
short* gpu_second_matrix;
hipMalloc((void**)&gpu_first_matrix, first_matrix_height * first_matrix_width * sizeof(short));
hipMemcpy(gpu_first_matrix, first_matrix, first_matrix_height * first_matrix_width * sizeof(short), hipMemcpyHostToDevice);
hipMalloc((void**)&gpu_second_matrix, second_matrix_height * second_matrix_width * sizeof(short));
dim3 grid;
dim3 block(32, 32);
grid.x = first_matrix_height / block.x;
if (first_matrix_height % block.x != 0)
grid.x += 1;
grid.y = first_matrix_width / block.y;
if (first_matrix_width % block.y != 0)
grid.y += 1;
hipEventCreate(&startTime);
hipEventCreate(&stopTime);
hipEventRecord(startTime);
kernelSimpleGpu << <grid, block >> > (
gpu_first_matrix,
first_matrix_height,
first_matrix_width,
gpu_second_matrix,
second_matrix_height,
second_matrix_width);
hipEventRecord(stopTime);
hipEventSynchronize(stopTime);
float result_time;
hipEventElapsedTime(&result_time, startTime, stopTime);
hipMemcpy(second_matrix, gpu_second_matrix,
second_matrix_height * second_matrix_width * sizeof(short),
hipMemcpyDeviceToHost);
return result_time;
}
__global__ void kernelSharedGpu(
short* first_matrix,
const int first_matrix_height,
const int first_matrix_width,
short* second_matrix,
const int second_matrix_height,
const int second_matrix_width) {
int width = blockIdx.x * blockDim.x + threadIdx.x;
int height = blockIdx.y * blockDim.y + threadIdx.y;
if (width > first_matrix_width || height > first_matrix_height)
return;
__shared__ unsigned short block[32][32];
int offset = width % 4;
int out_height = height * 4 + 3 - offset;
int out_width = width / 4;
block[threadIdx.y][threadIdx.x] = first_matrix[height * first_matrix_width + width];
second_matrix[out_height * second_matrix_width + out_width] = block[threadIdx.y][threadIdx.x];
block[threadIdx.y][threadIdx.x] = 0;
}
float transform_matrix_gpu_shared(short* first_matrix, const int first_matrix_height, const int first_matrix_width,
short* second_matrix, const int second_matrix_height, const int second_matrix_width) {
hipEvent_t startTime;
hipEvent_t stopTime;
short* gpu_first_matrix;
short* gpu_second_matrix;
hipMalloc((void**)&gpu_first_matrix, first_matrix_height * first_matrix_width * sizeof(short));
hipMemcpy(gpu_first_matrix, first_matrix, first_matrix_height * first_matrix_width * sizeof(short), hipMemcpyHostToDevice);
hipMalloc((void**)&gpu_second_matrix, second_matrix_height * second_matrix_width * sizeof(short));
dim3 grid;
dim3 block(32, 32);
grid.x = first_matrix_height / block.x;
if (first_matrix_height % block.x != 0)
grid.x += 1;
grid.y = first_matrix_width / block.y;
if (first_matrix_width % block.y != 0)
grid.y += 1;
hipEventCreate(&startTime);
hipEventCreate(&stopTime);
hipEventRecord(startTime);
kernelSharedGpu << <grid, block >> > (
gpu_first_matrix,
first_matrix_height,
first_matrix_width,
gpu_second_matrix,
second_matrix_height,
second_matrix_width);
hipEventRecord(stopTime);
hipEventSynchronize(stopTime);
float result_time;
hipEventElapsedTime(&result_time, startTime, stopTime);
hipMemcpy(second_matrix, gpu_second_matrix,
second_matrix_height * second_matrix_width * sizeof(short),
hipMemcpyDeviceToHost);
return result_time;
}
bool compare_matrix(short* first, short* second, int height, int width) {
for (auto i = 0; i < height; i++)
for (auto j = 0; j < width; j++)
if (first[i * width + j] != second[i * width + j])
return false;
return true;
}
short* initialize_matrix(const int height, const int width) {
const auto matrix = static_cast<short *>(calloc(height * width, sizeof(short)));
return matrix;
}
void fill_random_matrix(short* matrix, int height, int width) {
short initializer = 0;
for (auto i = 0; i < height; i++)
for (auto j = 0; j < width; j++)
matrix[i * width + j] = rand() % 8 + 1;
}
void show_matrix(short* matrix, const int height, const int width) {
for (auto i = 0; i < height; i++) {
for (auto j = 0; j < width; j++)
cout << setw(2) << matrix[i * width + j];
cout << endl;
}
}
int hard_compare_matrix(short* first, short* second, int height, int width) {
int count_miss = 0;
for (auto i = 0; i < height; i++)
for (auto j = 0; j < width; j++)
if (first[i * width + j] != second[i * width + j]) {
count_miss++;
}
return count_miss;
}
int main() {
int first_matrix_height;
int first_matrix_width;
cout << "Matrix height: ";
cin >> first_matrix_height;
cout << "Matrix width: ";
cin >> first_matrix_width;
const int second_matrix_height = first_matrix_height * 4;
const int second_matrix_width = first_matrix_width / 4;
const auto first_matrix = initialize_matrix(first_matrix_height, first_matrix_width);
auto second_matrix = initialize_matrix(second_matrix_height, second_matrix_width);
auto third_matrix = initialize_matrix(second_matrix_height, second_matrix_width);
auto fouth_matrix = initialize_matrix(second_matrix_height, second_matrix_width);
fill_random_matrix(first_matrix, first_matrix_height, first_matrix_width);
auto cpu_time = transform_matrix_cpu(
first_matrix,
first_matrix_height,
first_matrix_width,
second_matrix,
second_matrix_height,
second_matrix_width);
auto gpu_simple_time = transform_matrix_gpu_simple(
first_matrix,
first_matrix_height,
first_matrix_width,
third_matrix,
second_matrix_height,
second_matrix_width);
auto gpu_shared_time = transform_matrix_gpu_shared(
first_matrix,
first_matrix_height,
first_matrix_width,
fouth_matrix,
second_matrix_height,
second_matrix_width);
/*show_matrix(first_matrix, first_matrix_height, first_matrix_width);
cout << endl;
show_matrix(second_matrix, second_matrix_height, second_matrix_width);
cout << endl;
show_matrix(third_matrix, second_matrix_height, second_matrix_width);
cout << endl;
show_matrix(fouth_matrix, second_matrix_height, second_matrix_width);*/
cout << "CPU Time: " << cpu_time << " ms." << endl;
cout << "GPU simple Time: " << gpu_simple_time << " ms." << endl;
cout << "GPU shared Time: " << gpu_shared_time << " ms." << endl;
cout << "Compare CPU and Simple - " << compare_matrix(second_matrix, third_matrix, second_matrix_height, second_matrix_width) << endl;
cout << "Compare CPU and Shared - " << compare_matrix(second_matrix, fouth_matrix, second_matrix_height, second_matrix_width) << endl;
cout << "Compare Simple and Shared - " << compare_matrix(third_matrix, fouth_matrix, second_matrix_height, second_matrix_width) << endl;
int first_hard_compare = hard_compare_matrix(second_matrix, third_matrix, second_matrix_height, second_matrix_width);
int second_hard_compare = hard_compare_matrix(second_matrix, fouth_matrix, second_matrix_height, second_matrix_width);
int third_hard_compare = hard_compare_matrix(third_matrix, fouth_matrix, second_matrix_height, second_matrix_width);
cout << "Result of miss count of first hard compare: " << first_hard_compare << endl;
cout << "Result of miss count of second hard compare: " << second_hard_compare << endl;
cout << "Result of miss count of third hard compare: " << third_hard_compare << endl;
system("pause");
} | b270b20aab372a9c47f68e761c82166c0ac1acaa.cu | #include <cstdlib>
#include <ostream>
#include <iostream>
#include <iomanip>
#include <chrono>
#include <stdio.h>
#include <time.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include <cuda_runtime_api.h>
#include <device_functions.h>
#include "device_launch_parameters.h"
using namespace std;
double transform_matrix_cpu(
short* first_matrix,
const int first_matrix_height,
const int first_matrix_width,
short* second_matrix,
const int second_matrix_height,
const int second_matrix_width) {
auto start_cpu = chrono::steady_clock::now();
for (auto i = 0; i < first_matrix_height; i++)
for (auto j = 0; j < first_matrix_width; j+=4) {
second_matrix[(i * 4) * second_matrix_width + j / 4] = first_matrix[i * first_matrix_width + j + 3];
second_matrix[(i * 4 + 1) * second_matrix_width + j / 4] = first_matrix[i * first_matrix_width + j + 2];
second_matrix[(i * 4 + 2) * second_matrix_width + j / 4] = first_matrix[i * first_matrix_width + j + 1];
second_matrix[(i * 4 + 3) * second_matrix_width + j / 4] = first_matrix[i * first_matrix_width + j];
}
auto end_cpu = chrono::steady_clock::now();
auto cpu_time = end_cpu - start_cpu;
return chrono::duration <double, milli>(cpu_time).count();
}
__global__ void kernelSimpleGpu(
short* first_matrix,
const int first_matrix_height,
const int first_matrix_width,
short* second_matrix,
const int second_matrix_height,
const int second_matrix_width) {
int width = blockIdx.x * blockDim.x + threadIdx.x;
int height = blockIdx.y * blockDim.y + threadIdx.y;
if (width > first_matrix_width || height > first_matrix_height)
return;
int offset = width % 4;
int out_height = height * 4 + 3 - offset;
int out_width = width / 4;
second_matrix[out_height * second_matrix_width + out_width] = first_matrix[height * first_matrix_width + width];
}
float transform_matrix_gpu_simple(short* first_matrix, const int first_matrix_height, const int first_matrix_width,
short* second_matrix, const int second_matrix_height, const int second_matrix_width) {
cudaEvent_t startTime;
cudaEvent_t stopTime;
short* gpu_first_matrix;
short* gpu_second_matrix;
cudaMalloc((void**)&gpu_first_matrix, first_matrix_height * first_matrix_width * sizeof(short));
cudaMemcpy(gpu_first_matrix, first_matrix, first_matrix_height * first_matrix_width * sizeof(short), cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_second_matrix, second_matrix_height * second_matrix_width * sizeof(short));
dim3 grid;
dim3 block(32, 32);
grid.x = first_matrix_height / block.x;
if (first_matrix_height % block.x != 0)
grid.x += 1;
grid.y = first_matrix_width / block.y;
if (first_matrix_width % block.y != 0)
grid.y += 1;
cudaEventCreate(&startTime);
cudaEventCreate(&stopTime);
cudaEventRecord(startTime);
kernelSimpleGpu << <grid, block >> > (
gpu_first_matrix,
first_matrix_height,
first_matrix_width,
gpu_second_matrix,
second_matrix_height,
second_matrix_width);
cudaEventRecord(stopTime);
cudaEventSynchronize(stopTime);
float result_time;
cudaEventElapsedTime(&result_time, startTime, stopTime);
cudaMemcpy(second_matrix, gpu_second_matrix,
second_matrix_height * second_matrix_width * sizeof(short),
cudaMemcpyDeviceToHost);
return result_time;
}
__global__ void kernelSharedGpu(
short* first_matrix,
const int first_matrix_height,
const int first_matrix_width,
short* second_matrix,
const int second_matrix_height,
const int second_matrix_width) {
int width = blockIdx.x * blockDim.x + threadIdx.x;
int height = blockIdx.y * blockDim.y + threadIdx.y;
if (width > first_matrix_width || height > first_matrix_height)
return;
__shared__ unsigned short block[32][32];
int offset = width % 4;
int out_height = height * 4 + 3 - offset;
int out_width = width / 4;
block[threadIdx.y][threadIdx.x] = first_matrix[height * first_matrix_width + width];
second_matrix[out_height * second_matrix_width + out_width] = block[threadIdx.y][threadIdx.x];
block[threadIdx.y][threadIdx.x] = 0;
}
float transform_matrix_gpu_shared(short* first_matrix, const int first_matrix_height, const int first_matrix_width,
short* second_matrix, const int second_matrix_height, const int second_matrix_width) {
cudaEvent_t startTime;
cudaEvent_t stopTime;
short* gpu_first_matrix;
short* gpu_second_matrix;
cudaMalloc((void**)&gpu_first_matrix, first_matrix_height * first_matrix_width * sizeof(short));
cudaMemcpy(gpu_first_matrix, first_matrix, first_matrix_height * first_matrix_width * sizeof(short), cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_second_matrix, second_matrix_height * second_matrix_width * sizeof(short));
dim3 grid;
dim3 block(32, 32);
grid.x = first_matrix_height / block.x;
if (first_matrix_height % block.x != 0)
grid.x += 1;
grid.y = first_matrix_width / block.y;
if (first_matrix_width % block.y != 0)
grid.y += 1;
cudaEventCreate(&startTime);
cudaEventCreate(&stopTime);
cudaEventRecord(startTime);
kernelSharedGpu << <grid, block >> > (
gpu_first_matrix,
first_matrix_height,
first_matrix_width,
gpu_second_matrix,
second_matrix_height,
second_matrix_width);
cudaEventRecord(stopTime);
cudaEventSynchronize(stopTime);
float result_time;
cudaEventElapsedTime(&result_time, startTime, stopTime);
cudaMemcpy(second_matrix, gpu_second_matrix,
second_matrix_height * second_matrix_width * sizeof(short),
cudaMemcpyDeviceToHost);
return result_time;
}
bool compare_matrix(short* first, short* second, int height, int width) {
for (auto i = 0; i < height; i++)
for (auto j = 0; j < width; j++)
if (first[i * width + j] != second[i * width + j])
return false;
return true;
}
short* initialize_matrix(const int height, const int width) {
const auto matrix = static_cast<short *>(calloc(height * width, sizeof(short)));
return matrix;
}
void fill_random_matrix(short* matrix, int height, int width) {
short initializer = 0;
for (auto i = 0; i < height; i++)
for (auto j = 0; j < width; j++)
matrix[i * width + j] = rand() % 8 + 1;
}
void show_matrix(short* matrix, const int height, const int width) {
for (auto i = 0; i < height; i++) {
for (auto j = 0; j < width; j++)
cout << setw(2) << matrix[i * width + j];
cout << endl;
}
}
int hard_compare_matrix(short* first, short* second, int height, int width) {
int count_miss = 0;
for (auto i = 0; i < height; i++)
for (auto j = 0; j < width; j++)
if (first[i * width + j] != second[i * width + j]) {
count_miss++;
}
return count_miss;
}
int main() {
int first_matrix_height;
int first_matrix_width;
cout << "Matrix height: ";
cin >> first_matrix_height;
cout << "Matrix width: ";
cin >> first_matrix_width;
const int second_matrix_height = first_matrix_height * 4;
const int second_matrix_width = first_matrix_width / 4;
const auto first_matrix = initialize_matrix(first_matrix_height, first_matrix_width);
auto second_matrix = initialize_matrix(second_matrix_height, second_matrix_width);
auto third_matrix = initialize_matrix(second_matrix_height, second_matrix_width);
auto fouth_matrix = initialize_matrix(second_matrix_height, second_matrix_width);
fill_random_matrix(first_matrix, first_matrix_height, first_matrix_width);
auto cpu_time = transform_matrix_cpu(
first_matrix,
first_matrix_height,
first_matrix_width,
second_matrix,
second_matrix_height,
second_matrix_width);
auto gpu_simple_time = transform_matrix_gpu_simple(
first_matrix,
first_matrix_height,
first_matrix_width,
third_matrix,
second_matrix_height,
second_matrix_width);
auto gpu_shared_time = transform_matrix_gpu_shared(
first_matrix,
first_matrix_height,
first_matrix_width,
fouth_matrix,
second_matrix_height,
second_matrix_width);
/*show_matrix(first_matrix, first_matrix_height, first_matrix_width);
cout << endl;
show_matrix(second_matrix, second_matrix_height, second_matrix_width);
cout << endl;
show_matrix(third_matrix, second_matrix_height, second_matrix_width);
cout << endl;
show_matrix(fouth_matrix, second_matrix_height, second_matrix_width);*/
cout << "CPU Time: " << cpu_time << " ms." << endl;
cout << "GPU simple Time: " << gpu_simple_time << " ms." << endl;
cout << "GPU shared Time: " << gpu_shared_time << " ms." << endl;
cout << "Compare CPU and Simple - " << compare_matrix(second_matrix, third_matrix, second_matrix_height, second_matrix_width) << endl;
cout << "Compare CPU and Shared - " << compare_matrix(second_matrix, fouth_matrix, second_matrix_height, second_matrix_width) << endl;
cout << "Compare Simple and Shared - " << compare_matrix(third_matrix, fouth_matrix, second_matrix_height, second_matrix_width) << endl;
int first_hard_compare = hard_compare_matrix(second_matrix, third_matrix, second_matrix_height, second_matrix_width);
int second_hard_compare = hard_compare_matrix(second_matrix, fouth_matrix, second_matrix_height, second_matrix_width);
int third_hard_compare = hard_compare_matrix(third_matrix, fouth_matrix, second_matrix_height, second_matrix_width);
cout << "Result of miss count of first hard compare: " << first_hard_compare << endl;
cout << "Result of miss count of second hard compare: " << second_hard_compare << endl;
cout << "Result of miss count of third hard compare: " << third_hard_compare << endl;
system("pause");
} |
4e94e2ae8a67ff9cad6c13854d8f4af5bde9712a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <read_gauge.h>
#include <gauge_quda.h>
#include "gauge_force_quda.h"
#define MULT_SU3_NN_TEST(ma, mb) do{ \
float fa_re,fa_im, fb_re, fb_im, fc_re, fc_im; \
fa_re = \
ma##00_re * mb##00_re - ma##00_im * mb##00_im + \
ma##01_re * mb##10_re - ma##01_im * mb##10_im + \
ma##02_re * mb##20_re - ma##02_im * mb##20_im; \
fa_im = \
ma##00_re * mb##00_im + ma##00_im * mb##00_re + \
ma##01_re * mb##10_im + ma##01_im * mb##10_re + \
ma##02_re * mb##20_im + ma##02_im * mb##20_re; \
fb_re = \
ma##00_re * mb##01_re - ma##00_im * mb##01_im + \
ma##01_re * mb##11_re - ma##01_im * mb##11_im + \
ma##02_re * mb##21_re - ma##02_im * mb##21_im; \
fb_im = \
ma##00_re * mb##01_im + ma##00_im * mb##01_re + \
ma##01_re * mb##11_im + ma##01_im * mb##11_re + \
ma##02_re * mb##21_im + ma##02_im * mb##21_re; \
fc_re = \
ma##00_re * mb##02_re - ma##00_im * mb##02_im + \
ma##01_re * mb##12_re - ma##01_im * mb##12_im + \
ma##02_re * mb##22_re - ma##02_im * mb##22_im; \
fc_im = \
ma##00_re * mb##02_im + ma##00_im * mb##02_re + \
ma##01_re * mb##12_im + ma##01_im * mb##12_re + \
ma##02_re * mb##22_im + ma##02_im * mb##22_re; \
ma##00_re = fa_re; \
ma##00_im = fa_im; \
ma##01_re = fb_re; \
ma##01_im = fb_im; \
ma##02_re = fc_re; \
ma##02_im = fc_im; \
fa_re = \
ma##10_re * mb##00_re - ma##10_im * mb##00_im + \
ma##11_re * mb##10_re - ma##11_im * mb##10_im + \
ma##12_re * mb##20_re - ma##12_im * mb##20_im; \
fa_im = \
ma##10_re * mb##00_im + ma##10_im * mb##00_re + \
ma##11_re * mb##10_im + ma##11_im * mb##10_re + \
ma##12_re * mb##20_im + ma##12_im * mb##20_re; \
fb_re = \
ma##10_re * mb##01_re - ma##10_im * mb##01_im + \
ma##11_re * mb##11_re - ma##11_im * mb##11_im + \
ma##12_re * mb##21_re - ma##12_im * mb##21_im; \
fb_im = \
ma##10_re * mb##01_im + ma##10_im * mb##01_re + \
ma##11_re * mb##11_im + ma##11_im * mb##11_re + \
ma##12_re * mb##21_im + ma##12_im * mb##21_re; \
fc_re = \
ma##10_re * mb##02_re - ma##10_im * mb##02_im + \
ma##11_re * mb##12_re - ma##11_im * mb##12_im + \
ma##12_re * mb##22_re - ma##12_im * mb##22_im; \
fc_im = \
ma##10_re * mb##02_im + ma##10_im * mb##02_re + \
ma##11_re * mb##12_im + ma##11_im * mb##12_re + \
ma##12_re * mb##22_im + ma##12_im * mb##22_re; \
ma##10_re = fa_re; \
ma##10_im = fa_im; \
ma##11_re = fb_re; \
ma##11_im = fb_im; \
ma##12_re = fc_re; \
ma##12_im = fc_im; \
fa_re = \
ma##20_re * mb##00_re - ma##20_im * mb##00_im + \
ma##21_re * mb##10_re - ma##21_im * mb##10_im + \
ma##22_re * mb##20_re - ma##22_im * mb##20_im; \
fa_im = \
ma##20_re * mb##00_im + ma##20_im * mb##00_re + \
ma##21_re * mb##10_im + ma##21_im * mb##10_re + \
ma##22_re * mb##20_im + ma##22_im * mb##20_re; \
fb_re = \
ma##20_re * mb##01_re - ma##20_im * mb##01_im + \
ma##21_re * mb##11_re - ma##21_im * mb##11_im + \
ma##22_re * mb##21_re - ma##22_im * mb##21_im; \
fb_im = \
ma##20_re * mb##01_im + ma##20_im * mb##01_re + \
ma##21_re * mb##11_im + ma##21_im * mb##11_re + \
ma##22_re * mb##21_im + ma##22_im * mb##21_re; \
fc_re = \
ma##20_re * mb##02_re - ma##20_im * mb##02_im + \
ma##21_re * mb##12_re - ma##21_im * mb##12_im + \
ma##22_re * mb##22_re - ma##22_im * mb##22_im; \
fc_im = \
ma##20_re * mb##02_im + ma##20_im * mb##02_re + \
ma##21_re * mb##12_im + ma##21_im * mb##12_re + \
ma##22_re * mb##22_im + ma##22_im * mb##22_re; \
ma##20_re = fa_re; \
ma##20_im = fa_im; \
ma##21_re = fb_re; \
ma##21_im = fb_im; \
ma##22_re = fc_re; \
ma##22_im = fc_im; \
}while(0)
#define MULT_SU3_NA_TEST(ma, mb) do{ \
float fa_re, fa_im, fb_re, fb_im, fc_re, fc_im; \
fa_re = \
ma##00_re * mb##T00_re - ma##00_im * mb##T00_im + \
ma##01_re * mb##T10_re - ma##01_im * mb##T10_im + \
ma##02_re * mb##T20_re - ma##02_im * mb##T20_im; \
fa_im = \
ma##00_re * mb##T00_im + ma##00_im * mb##T00_re + \
ma##01_re * mb##T10_im + ma##01_im * mb##T10_re + \
ma##02_re * mb##T20_im + ma##02_im * mb##T20_re; \
fb_re = \
ma##00_re * mb##T01_re - ma##00_im * mb##T01_im + \
ma##01_re * mb##T11_re - ma##01_im * mb##T11_im + \
ma##02_re * mb##T21_re - ma##02_im * mb##T21_im; \
fb_im = \
ma##00_re * mb##T01_im + ma##00_im * mb##T01_re + \
ma##01_re * mb##T11_im + ma##01_im * mb##T11_re + \
ma##02_re * mb##T21_im + ma##02_im * mb##T21_re; \
fc_re = \
ma##00_re * mb##T02_re - ma##00_im * mb##T02_im + \
ma##01_re * mb##T12_re - ma##01_im * mb##T12_im + \
ma##02_re * mb##T22_re - ma##02_im * mb##T22_im; \
fc_im = \
ma##00_re * mb##T02_im + ma##00_im * mb##T02_re + \
ma##01_re * mb##T12_im + ma##01_im * mb##T12_re + \
ma##02_re * mb##T22_im + ma##02_im * mb##T22_re; \
ma##00_re = fa_re; \
ma##00_im = fa_im; \
ma##01_re = fb_re; \
ma##01_im = fb_im; \
ma##02_re = fc_re; \
ma##02_im = fc_im; \
fa_re = \
ma##10_re * mb##T00_re - ma##10_im * mb##T00_im + \
ma##11_re * mb##T10_re - ma##11_im * mb##T10_im + \
ma##12_re * mb##T20_re - ma##12_im * mb##T20_im; \
fa_im = \
ma##10_re * mb##T00_im + ma##10_im * mb##T00_re + \
ma##11_re * mb##T10_im + ma##11_im * mb##T10_re + \
ma##12_re * mb##T20_im + ma##12_im * mb##T20_re; \
fb_re = \
ma##10_re * mb##T01_re - ma##10_im * mb##T01_im + \
ma##11_re * mb##T11_re - ma##11_im * mb##T11_im + \
ma##12_re * mb##T21_re - ma##12_im * mb##T21_im; \
fb_im = \
ma##10_re * mb##T01_im + ma##10_im * mb##T01_re + \
ma##11_re * mb##T11_im + ma##11_im * mb##T11_re + \
ma##12_re * mb##T21_im + ma##12_im * mb##T21_re; \
fc_re = \
ma##10_re * mb##T02_re - ma##10_im * mb##T02_im + \
ma##11_re * mb##T12_re - ma##11_im * mb##T12_im + \
ma##12_re * mb##T22_re - ma##12_im * mb##T22_im; \
fc_im = \
ma##10_re * mb##T02_im + ma##10_im * mb##T02_re + \
ma##11_re * mb##T12_im + ma##11_im * mb##T12_re + \
ma##12_re * mb##T22_im + ma##12_im * mb##T22_re; \
ma##10_re = fa_re; \
ma##10_im = fa_im; \
ma##11_re = fb_re; \
ma##11_im = fb_im; \
ma##12_re = fc_re; \
ma##12_im = fc_im; \
fa_re = \
ma##20_re * mb##T00_re - ma##20_im * mb##T00_im + \
ma##21_re * mb##T10_re - ma##21_im * mb##T10_im + \
ma##22_re * mb##T20_re - ma##22_im * mb##T20_im; \
fa_im = \
ma##20_re * mb##T00_im + ma##20_im * mb##T00_re + \
ma##21_re * mb##T10_im + ma##21_im * mb##T10_re + \
ma##22_re * mb##T20_im + ma##22_im * mb##T20_re; \
fb_re = \
ma##20_re * mb##T01_re - ma##20_im * mb##T01_im + \
ma##21_re * mb##T11_re - ma##21_im * mb##T11_im + \
ma##22_re * mb##T21_re - ma##22_im * mb##T21_im; \
fb_im = \
ma##20_re * mb##T01_im + ma##20_im * mb##T01_re + \
ma##21_re * mb##T11_im + ma##21_im * mb##T11_re + \
ma##22_re * mb##T21_im + ma##22_im * mb##T21_re; \
fc_re = \
ma##20_re * mb##T02_re - ma##20_im * mb##T02_im + \
ma##21_re * mb##T12_re - ma##21_im * mb##T12_im + \
ma##22_re * mb##T22_re - ma##22_im * mb##T22_im; \
fc_im = \
ma##20_re * mb##T02_im + ma##20_im * mb##T02_re + \
ma##21_re * mb##T12_im + ma##21_im * mb##T12_re + \
ma##22_re * mb##T22_im + ma##22_im * mb##T22_re; \
ma##20_re = fa_re; \
ma##20_im = fa_im; \
ma##21_re = fb_re; \
ma##21_im = fb_im; \
ma##22_re = fc_re; \
ma##22_im = fc_im; \
}while(0)
#define MULT_SU3_AN_TEST(ma, mb) do{ \
float fa_re, fa_im, fb_re, fb_im, fc_re, fc_im; \
fa_re = \
ma##T00_re * mb##00_re - ma##T00_im * mb##00_im + \
ma##T01_re * mb##10_re - ma##T01_im * mb##10_im + \
ma##T02_re * mb##20_re - ma##T02_im * mb##20_im; \
fa_im = \
ma##T00_re * mb##00_im + ma##T00_im * mb##00_re + \
ma##T01_re * mb##10_im + ma##T01_im * mb##10_re + \
ma##T02_re * mb##20_im + ma##T02_im * mb##20_re; \
fb_re = \
ma##T10_re * mb##00_re - ma##T10_im * mb##00_im + \
ma##T11_re * mb##10_re - ma##T11_im * mb##10_im + \
ma##T12_re * mb##20_re - ma##T12_im * mb##20_im; \
fb_im = \
ma##T10_re * mb##00_im + ma##T10_im * mb##00_re + \
ma##T11_re * mb##10_im + ma##T11_im * mb##10_re + \
ma##T12_re * mb##20_im + ma##T12_im * mb##20_re; \
fc_re = \
ma##T20_re * mb##00_re - ma##T20_im * mb##00_im + \
ma##T21_re * mb##10_re - ma##T21_im * mb##10_im + \
ma##T22_re * mb##20_re - ma##T22_im * mb##20_im; \
fc_im = \
ma##T20_re * mb##00_im + ma##T20_im * mb##00_re + \
ma##T21_re * mb##10_im + ma##T21_im * mb##10_re + \
ma##T22_re * mb##20_im + ma##T22_im * mb##20_re; \
mb##00_re = fa_re; \
mb##00_im = fa_im; \
mb##10_re = fb_re; \
mb##10_im = fb_im; \
mb##20_re = fc_re; \
mb##20_im = fc_im; \
fa_re = \
ma##T00_re * mb##01_re - ma##T00_im * mb##01_im + \
ma##T01_re * mb##11_re - ma##T01_im * mb##11_im + \
ma##T02_re * mb##21_re - ma##T02_im * mb##21_im; \
fa_im = \
ma##T00_re * mb##01_im + ma##T00_im * mb##01_re + \
ma##T01_re * mb##11_im + ma##T01_im * mb##11_re + \
ma##T02_re * mb##21_im + ma##T02_im * mb##21_re; \
fb_re = \
ma##T10_re * mb##01_re - ma##T10_im * mb##01_im + \
ma##T11_re * mb##11_re - ma##T11_im * mb##11_im + \
ma##T12_re * mb##21_re - ma##T12_im * mb##21_im; \
fb_im = \
ma##T10_re * mb##01_im + ma##T10_im * mb##01_re + \
ma##T11_re * mb##11_im + ma##T11_im * mb##11_re + \
ma##T12_re * mb##21_im + ma##T12_im * mb##21_re; \
fc_re = \
ma##T20_re * mb##01_re - ma##T20_im * mb##01_im + \
ma##T21_re * mb##11_re - ma##T21_im * mb##11_im + \
ma##T22_re * mb##21_re - ma##T22_im * mb##21_im; \
fc_im = \
ma##T20_re * mb##01_im + ma##T20_im * mb##01_re + \
ma##T21_re * mb##11_im + ma##T21_im * mb##11_re + \
ma##T22_re * mb##21_im + ma##T22_im * mb##21_re; \
mb##01_re = fa_re; \
mb##01_im = fa_im; \
mb##11_re = fb_re; \
mb##11_im = fb_im; \
mb##21_re = fc_re; \
mb##21_im = fc_im; \
fa_re = \
ma##T00_re * mb##02_re - ma##T00_im * mb##02_im + \
ma##T01_re * mb##12_re - ma##T01_im * mb##12_im + \
ma##T02_re * mb##22_re - ma##T02_im * mb##22_im; \
fa_im = \
ma##T00_re * mb##02_im + ma##T00_im * mb##02_re + \
ma##T01_re * mb##12_im + ma##T01_im * mb##12_re + \
ma##T02_re * mb##22_im + ma##T02_im * mb##22_re; \
fb_re = \
ma##T10_re * mb##02_re - ma##T10_im * mb##02_im + \
ma##T11_re * mb##12_re - ma##T11_im * mb##12_im + \
ma##T12_re * mb##22_re - ma##T12_im * mb##22_im; \
fb_im = \
ma##T10_re * mb##02_im + ma##T10_im * mb##02_re + \
ma##T11_re * mb##12_im + ma##T11_im * mb##12_re + \
ma##T12_re * mb##22_im + ma##T12_im * mb##22_re; \
fc_re = \
ma##T20_re * mb##02_re - ma##T20_im * mb##02_im + \
ma##T21_re * mb##12_re - ma##T21_im * mb##12_im + \
ma##T22_re * mb##22_re - ma##T22_im * mb##22_im; \
fc_im = \
ma##T20_re * mb##02_im + ma##T20_im * mb##02_re + \
ma##T21_re * mb##12_im + ma##T21_im * mb##12_re + \
ma##T22_re * mb##22_im + ma##T22_im * mb##22_re; \
mb##02_re = fa_re; \
mb##02_im = fa_im; \
mb##12_re = fb_re; \
mb##12_im = fb_im; \
mb##22_re = fc_re; \
mb##22_im = fc_im; \
}while(0)
#define GF_SITE_MATRIX_LOAD_TEX 1
#if (GF_SITE_MATRIX_LOAD_TEX == 1)
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink0TexSingle, dir, idx, var)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink1TexSingle, dir, idx, var)
#else
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkEven, dir, idx, var)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkOdd, dir, idx, var)
#endif
#define LOAD_MATRIX LOAD_MATRIX_12_SINGLE
#define LOAD_ANTI_HERMITIAN LOAD_ANTI_HERMITIAN_SINGLE
#define WRITE_ANTI_HERMITIAN WRITE_ANTI_HERMITIAN_SINGLE
#define RECONSTRUCT_MATRIX RECONSTRUCT_LINK_12
__constant__ int path_max_length;
void
gauge_force_init_cuda(QudaGaugeParam* param, int path_max_length)
{
static int gauge_force_init_cuda_flag = 0;
if (gauge_force_init_cuda_flag){
return;
}
gauge_force_init_cuda_flag=1;
init_kernel_cuda(param);
hipMemcpyToSymbol("path_max_length", &path_max_length, sizeof(int));
}
#define COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mydir, idx) do { \
switch(mydir){ \
case 0: \
new_mem_idx = ( (new_x1==X1m1)?idx-X1m1:idx+1); \
new_x1 = (new_x1==X1m1)?0:new_x1+1; \
break; \
case 1: \
new_mem_idx = ( (new_x2==X2m1)?idx-X2X1mX1:idx+X1); \
new_x2 = (new_x2==X2m1)?0:new_x2+1; \
break; \
case 2: \
new_mem_idx = ( (new_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \
new_x3 = (new_x3==X3m1)?0:new_x3+1; \
break; \
case 3: \
new_mem_idx = ( (new_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \
new_x4 = (new_x4==X4m1)?0:new_x4+1; \
break; \
} \
}while(0)
#define COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mydir, idx) do { \
switch(mydir){ \
case 0: \
new_mem_idx = ( (new_x1==0)?idx+X1m1:idx-1); \
new_x1 = (new_x1==0)?X1m1:new_x1 - 1; \
break; \
case 1: \
new_mem_idx = ( (new_x2==0)?idx+X2X1mX1:idx-X1); \
new_x2 = (new_x2==0)?X2m1:new_x2 - 1; \
break; \
case 2: \
new_mem_idx = ( (new_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \
new_x3 = (new_x3==0)?X3m1:new_x3 - 1; \
break; \
case 3: \
new_mem_idx = ( (new_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \
new_x4 = (new_x4==0)?X4m1:new_x4 - 1; \
break; \
} \
}while(0)
#define GF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, i1,i2,i3,i4) do { \
sign =1; \
switch(dir){ \
case XUP: \
if ( (i4 & 1) == 1){ \
sign = 1; \
} \
break; \
case YUP: \
if ( ((i4+i1) & 1) == 1){ \
sign = 1; \
} \
break; \
case ZUP: \
if ( ((i4+i1+i2) & 1) == 1){ \
sign = 1; \
} \
break; \
case TUP: \
if (i4 == X4m1 ){ \
sign = 1; \
} \
break; \
} \
}while (0)
//for now we only consider 12-reconstruct and single precision
template<int oddBit>
__global__ void
parity_compute_gauge_force_kernel(float2* momEven, float2* momOdd,
int dir, double eb3,
float4* linkEven, float4* linkOdd,
int* input_path,
int* length, float* path_coeff, int num_paths)
{
int i,j=0;
int sid = blockIdx.x * blockDim.x + threadIdx.x;
int z1 = FAST_INT_DIVIDE(sid, X1h);
int x1h = sid - z1*X1h;
int z2 = FAST_INT_DIVIDE(z1, X2);
int x2 = z1 - z2*X2;
int x4 = FAST_INT_DIVIDE(z2, X3);
int x3 = z2 - x4*X3;
int x1odd = (x2 + x3 + x4 + oddBit) & 1;
int x1 = 2*x1h + x1odd;
int X = 2*sid + x1odd;
int sign = 1;
float2* mymom=momEven;
if (oddBit){
mymom = momOdd;
}
float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4;
float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4;
float2 STAPLE0, STAPLE1, STAPLE2, STAPLE3,STAPLE4, STAPLE5, STAPLE6, STAPLE7, STAPLE8;
float2 AH0, AH1, AH2, AH3, AH4;
int new_mem_idx;
SET_SU3_MATRIX(staple, 0);
for(i=0;i < num_paths; i++){
int nbr_oddbit = (oddBit^1 );
int new_x1 =x1;
int new_x2 =x2;
int new_x3 =x3;
int new_x4 =x4;
COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(dir, X);
//linka: current matrix
//linkb: the loaded matrix in this round
SET_UNIT_SU3_MATRIX(linka);
int* path = input_path + i*path_max_length;
int lnkdir;
int path0 = path[0];
if (GOES_FORWARDS(path0)){
lnkdir=path0;
}else{
lnkdir=OPP_DIR(path0);
COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(path0), new_mem_idx);
nbr_oddbit = nbr_oddbit^1;
}
int nbr_idx = new_mem_idx >>1;
if (nbr_oddbit){
LOAD_ODD_MATRIX( lnkdir, nbr_idx, LINKB);
}else{
LOAD_EVEN_MATRIX( lnkdir, nbr_idx, LINKB);
}
GF_COMPUTE_RECONSTRUCT_SIGN(sign, lnkdir, new_x1, new_x2, new_x3, new_x4);
RECONSTRUCT_MATRIX(lnkdir, nbr_idx, sign, linkb);
if (GOES_FORWARDS(path0)){
COPY_SU3_MATRIX(linkb, linka);
COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(path0, new_mem_idx);
nbr_oddbit = nbr_oddbit^1;
}else{
SU3_ADJOINT(linkb, linka);
}
for(j=1; j < length[i]; j++){
int lnkdir;
int pathj = path[j];
if (GOES_FORWARDS(pathj)){
lnkdir=pathj;
}else{
lnkdir=OPP_DIR(pathj);
COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(pathj), new_mem_idx);
nbr_oddbit = nbr_oddbit^1;
}
int nbr_idx = new_mem_idx >>1;
if (nbr_oddbit){
LOAD_ODD_MATRIX(lnkdir, nbr_idx, LINKB);
}else{
LOAD_EVEN_MATRIX(lnkdir, nbr_idx, LINKB);
}
GF_COMPUTE_RECONSTRUCT_SIGN(sign, lnkdir, new_x1, new_x2, new_x3, new_x4);
RECONSTRUCT_MATRIX(lnkdir, nbr_idx, sign, linkb);
if (GOES_FORWARDS(pathj)){
MULT_SU3_NN_TEST(linka, linkb);
COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(pathj, new_mem_idx);
nbr_oddbit = nbr_oddbit^1;
}else{
MULT_SU3_NA_TEST(linka, linkb);
}
}//j
SCALAR_MULT_ADD_SU3_MATRIX(staple, linka, path_coeff[i], staple);
}//i
//update mom
if (oddBit){
LOAD_ODD_MATRIX(dir, sid, LINKA);
}else{
LOAD_EVEN_MATRIX(dir, sid, LINKA);
}
GF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, x1, x2, x3, x4);
RECONSTRUCT_MATRIX(dir, sid, sign, linka);
MULT_SU3_NN_TEST(linka, staple);
LOAD_ANTI_HERMITIAN(mymom, dir, sid, AH);
UNCOMPRESS_ANTI_HERMITIAN(ah, linkb);
SCALAR_MULT_SUB_SU3_MATRIX(linkb, linka, eb3, linka);
MAKE_ANTI_HERMITIAN(linka, ah);
WRITE_ANTI_HERMITIAN(mymom, dir, sid, AH);
return;
}
void
gauge_force_cuda(FullMom cudaMom, int dir, double eb3, FullGauge cudaSiteLink,
QudaGaugeParam* param, int** input_path,
int* length, void* path_coeff, int num_paths, int max_length)
{
int i, j;
//input_path
int bytes = num_paths*max_length* sizeof(int);
int* input_path_d;
hipMalloc((void**)&input_path_d, bytes); checkCudaError();
hipMemset(input_path_d, 0, bytes);checkCudaError();
int* input_path_h = (int*)malloc(bytes);
if (input_path_h == NULL){
printf("ERROR: malloc failed for input_path_h in function %s\n", __FUNCTION__);
exit(1);
}
memset(input_path_h, 0, bytes);
for(i=0;i < num_paths;i++){
for(j=0; j < length[i]; j++){
input_path_h[i*max_length + j] =input_path[i][j];
}
}
hipMemcpy(input_path_d, input_path_h, bytes, hipMemcpyHostToDevice); checkCudaError();
//length
int* length_d;
hipMalloc((void**)&length_d, num_paths*sizeof(int)); checkCudaError();
hipMemcpy(length_d, length, num_paths*sizeof(int), hipMemcpyHostToDevice); checkCudaError();
//path_coeff
int gsize;
if (param->cuda_prec == QUDA_DOUBLE_PRECISION){
gsize = sizeof(double);
}else{
gsize= sizeof(float);
}
void* path_coeff_d;
hipMalloc((void**)&path_coeff_d, num_paths*gsize); checkCudaError();
hipMemcpy(path_coeff_d, path_coeff, num_paths*gsize, hipMemcpyHostToDevice); checkCudaError();
//compute the gauge forces
int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3];
dim3 blockDim(BLOCK_DIM, 1,1);
dim3 gridDim(volume/blockDim.x, 1, 1);
dim3 halfGridDim(volume/(2*blockDim.x), 1, 1);
float2* momEven = (float2*)cudaMom.even;
float2* momOdd = (float2*)cudaMom.odd;
float4* linkEven = (float4*)cudaSiteLink.even;
float4* linkOdd = (float4*)cudaSiteLink.odd;
hipBindTexture(0, siteLink0TexSingle, cudaSiteLink.even, cudaSiteLink.bytes);
hipBindTexture(0, siteLink1TexSingle, cudaSiteLink.odd, cudaSiteLink.bytes);
hipLaunchKernelGGL(( parity_compute_gauge_force_kernel<0>), dim3(halfGridDim), dim3(blockDim), 0, 0, momEven, momOdd,
dir, eb3,
linkEven, linkOdd,
input_path_d, length_d, (float*)path_coeff_d,
num_paths);
//odd
/* The reason we do not switch the even/odd function input paramemters and the texture binding
* is that we use the oddbit to decided where to load, in the kernel function
*/
hipLaunchKernelGGL(( parity_compute_gauge_force_kernel<1>), dim3(halfGridDim), dim3(blockDim), 0, 0, momEven, momOdd,
dir, eb3,
linkEven, linkOdd,
input_path_d, length_d, (float*)path_coeff_d,
num_paths);
hipUnbindTexture(siteLink0TexSingle);
hipUnbindTexture(siteLink1TexSingle);
checkCudaError();
hipFree(input_path_d); checkCudaError();
free(input_path_h);
hipFree(length_d);
hipFree(path_coeff_d);
}
#undef LOAD_EVEN_MATRIX
#undef LOAD_ODD_MATRIX
#undef LOAD_MATRIX
#undef LOAD_ANTI_HERMITIAN
#undef WRITE_ANTI_HERMITIAN
#undef RECONSTRUCT_MATRIX
| 4e94e2ae8a67ff9cad6c13854d8f4af5bde9712a.cu | #include <read_gauge.h>
#include <gauge_quda.h>
#include "gauge_force_quda.h"
#define MULT_SU3_NN_TEST(ma, mb) do{ \
float fa_re,fa_im, fb_re, fb_im, fc_re, fc_im; \
fa_re = \
ma##00_re * mb##00_re - ma##00_im * mb##00_im + \
ma##01_re * mb##10_re - ma##01_im * mb##10_im + \
ma##02_re * mb##20_re - ma##02_im * mb##20_im; \
fa_im = \
ma##00_re * mb##00_im + ma##00_im * mb##00_re + \
ma##01_re * mb##10_im + ma##01_im * mb##10_re + \
ma##02_re * mb##20_im + ma##02_im * mb##20_re; \
fb_re = \
ma##00_re * mb##01_re - ma##00_im * mb##01_im + \
ma##01_re * mb##11_re - ma##01_im * mb##11_im + \
ma##02_re * mb##21_re - ma##02_im * mb##21_im; \
fb_im = \
ma##00_re * mb##01_im + ma##00_im * mb##01_re + \
ma##01_re * mb##11_im + ma##01_im * mb##11_re + \
ma##02_re * mb##21_im + ma##02_im * mb##21_re; \
fc_re = \
ma##00_re * mb##02_re - ma##00_im * mb##02_im + \
ma##01_re * mb##12_re - ma##01_im * mb##12_im + \
ma##02_re * mb##22_re - ma##02_im * mb##22_im; \
fc_im = \
ma##00_re * mb##02_im + ma##00_im * mb##02_re + \
ma##01_re * mb##12_im + ma##01_im * mb##12_re + \
ma##02_re * mb##22_im + ma##02_im * mb##22_re; \
ma##00_re = fa_re; \
ma##00_im = fa_im; \
ma##01_re = fb_re; \
ma##01_im = fb_im; \
ma##02_re = fc_re; \
ma##02_im = fc_im; \
fa_re = \
ma##10_re * mb##00_re - ma##10_im * mb##00_im + \
ma##11_re * mb##10_re - ma##11_im * mb##10_im + \
ma##12_re * mb##20_re - ma##12_im * mb##20_im; \
fa_im = \
ma##10_re * mb##00_im + ma##10_im * mb##00_re + \
ma##11_re * mb##10_im + ma##11_im * mb##10_re + \
ma##12_re * mb##20_im + ma##12_im * mb##20_re; \
fb_re = \
ma##10_re * mb##01_re - ma##10_im * mb##01_im + \
ma##11_re * mb##11_re - ma##11_im * mb##11_im + \
ma##12_re * mb##21_re - ma##12_im * mb##21_im; \
fb_im = \
ma##10_re * mb##01_im + ma##10_im * mb##01_re + \
ma##11_re * mb##11_im + ma##11_im * mb##11_re + \
ma##12_re * mb##21_im + ma##12_im * mb##21_re; \
fc_re = \
ma##10_re * mb##02_re - ma##10_im * mb##02_im + \
ma##11_re * mb##12_re - ma##11_im * mb##12_im + \
ma##12_re * mb##22_re - ma##12_im * mb##22_im; \
fc_im = \
ma##10_re * mb##02_im + ma##10_im * mb##02_re + \
ma##11_re * mb##12_im + ma##11_im * mb##12_re + \
ma##12_re * mb##22_im + ma##12_im * mb##22_re; \
ma##10_re = fa_re; \
ma##10_im = fa_im; \
ma##11_re = fb_re; \
ma##11_im = fb_im; \
ma##12_re = fc_re; \
ma##12_im = fc_im; \
fa_re = \
ma##20_re * mb##00_re - ma##20_im * mb##00_im + \
ma##21_re * mb##10_re - ma##21_im * mb##10_im + \
ma##22_re * mb##20_re - ma##22_im * mb##20_im; \
fa_im = \
ma##20_re * mb##00_im + ma##20_im * mb##00_re + \
ma##21_re * mb##10_im + ma##21_im * mb##10_re + \
ma##22_re * mb##20_im + ma##22_im * mb##20_re; \
fb_re = \
ma##20_re * mb##01_re - ma##20_im * mb##01_im + \
ma##21_re * mb##11_re - ma##21_im * mb##11_im + \
ma##22_re * mb##21_re - ma##22_im * mb##21_im; \
fb_im = \
ma##20_re * mb##01_im + ma##20_im * mb##01_re + \
ma##21_re * mb##11_im + ma##21_im * mb##11_re + \
ma##22_re * mb##21_im + ma##22_im * mb##21_re; \
fc_re = \
ma##20_re * mb##02_re - ma##20_im * mb##02_im + \
ma##21_re * mb##12_re - ma##21_im * mb##12_im + \
ma##22_re * mb##22_re - ma##22_im * mb##22_im; \
fc_im = \
ma##20_re * mb##02_im + ma##20_im * mb##02_re + \
ma##21_re * mb##12_im + ma##21_im * mb##12_re + \
ma##22_re * mb##22_im + ma##22_im * mb##22_re; \
ma##20_re = fa_re; \
ma##20_im = fa_im; \
ma##21_re = fb_re; \
ma##21_im = fb_im; \
ma##22_re = fc_re; \
ma##22_im = fc_im; \
}while(0)
#define MULT_SU3_NA_TEST(ma, mb) do{ \
float fa_re, fa_im, fb_re, fb_im, fc_re, fc_im; \
fa_re = \
ma##00_re * mb##T00_re - ma##00_im * mb##T00_im + \
ma##01_re * mb##T10_re - ma##01_im * mb##T10_im + \
ma##02_re * mb##T20_re - ma##02_im * mb##T20_im; \
fa_im = \
ma##00_re * mb##T00_im + ma##00_im * mb##T00_re + \
ma##01_re * mb##T10_im + ma##01_im * mb##T10_re + \
ma##02_re * mb##T20_im + ma##02_im * mb##T20_re; \
fb_re = \
ma##00_re * mb##T01_re - ma##00_im * mb##T01_im + \
ma##01_re * mb##T11_re - ma##01_im * mb##T11_im + \
ma##02_re * mb##T21_re - ma##02_im * mb##T21_im; \
fb_im = \
ma##00_re * mb##T01_im + ma##00_im * mb##T01_re + \
ma##01_re * mb##T11_im + ma##01_im * mb##T11_re + \
ma##02_re * mb##T21_im + ma##02_im * mb##T21_re; \
fc_re = \
ma##00_re * mb##T02_re - ma##00_im * mb##T02_im + \
ma##01_re * mb##T12_re - ma##01_im * mb##T12_im + \
ma##02_re * mb##T22_re - ma##02_im * mb##T22_im; \
fc_im = \
ma##00_re * mb##T02_im + ma##00_im * mb##T02_re + \
ma##01_re * mb##T12_im + ma##01_im * mb##T12_re + \
ma##02_re * mb##T22_im + ma##02_im * mb##T22_re; \
ma##00_re = fa_re; \
ma##00_im = fa_im; \
ma##01_re = fb_re; \
ma##01_im = fb_im; \
ma##02_re = fc_re; \
ma##02_im = fc_im; \
fa_re = \
ma##10_re * mb##T00_re - ma##10_im * mb##T00_im + \
ma##11_re * mb##T10_re - ma##11_im * mb##T10_im + \
ma##12_re * mb##T20_re - ma##12_im * mb##T20_im; \
fa_im = \
ma##10_re * mb##T00_im + ma##10_im * mb##T00_re + \
ma##11_re * mb##T10_im + ma##11_im * mb##T10_re + \
ma##12_re * mb##T20_im + ma##12_im * mb##T20_re; \
fb_re = \
ma##10_re * mb##T01_re - ma##10_im * mb##T01_im + \
ma##11_re * mb##T11_re - ma##11_im * mb##T11_im + \
ma##12_re * mb##T21_re - ma##12_im * mb##T21_im; \
fb_im = \
ma##10_re * mb##T01_im + ma##10_im * mb##T01_re + \
ma##11_re * mb##T11_im + ma##11_im * mb##T11_re + \
ma##12_re * mb##T21_im + ma##12_im * mb##T21_re; \
fc_re = \
ma##10_re * mb##T02_re - ma##10_im * mb##T02_im + \
ma##11_re * mb##T12_re - ma##11_im * mb##T12_im + \
ma##12_re * mb##T22_re - ma##12_im * mb##T22_im; \
fc_im = \
ma##10_re * mb##T02_im + ma##10_im * mb##T02_re + \
ma##11_re * mb##T12_im + ma##11_im * mb##T12_re + \
ma##12_re * mb##T22_im + ma##12_im * mb##T22_re; \
ma##10_re = fa_re; \
ma##10_im = fa_im; \
ma##11_re = fb_re; \
ma##11_im = fb_im; \
ma##12_re = fc_re; \
ma##12_im = fc_im; \
fa_re = \
ma##20_re * mb##T00_re - ma##20_im * mb##T00_im + \
ma##21_re * mb##T10_re - ma##21_im * mb##T10_im + \
ma##22_re * mb##T20_re - ma##22_im * mb##T20_im; \
fa_im = \
ma##20_re * mb##T00_im + ma##20_im * mb##T00_re + \
ma##21_re * mb##T10_im + ma##21_im * mb##T10_re + \
ma##22_re * mb##T20_im + ma##22_im * mb##T20_re; \
fb_re = \
ma##20_re * mb##T01_re - ma##20_im * mb##T01_im + \
ma##21_re * mb##T11_re - ma##21_im * mb##T11_im + \
ma##22_re * mb##T21_re - ma##22_im * mb##T21_im; \
fb_im = \
ma##20_re * mb##T01_im + ma##20_im * mb##T01_re + \
ma##21_re * mb##T11_im + ma##21_im * mb##T11_re + \
ma##22_re * mb##T21_im + ma##22_im * mb##T21_re; \
fc_re = \
ma##20_re * mb##T02_re - ma##20_im * mb##T02_im + \
ma##21_re * mb##T12_re - ma##21_im * mb##T12_im + \
ma##22_re * mb##T22_re - ma##22_im * mb##T22_im; \
fc_im = \
ma##20_re * mb##T02_im + ma##20_im * mb##T02_re + \
ma##21_re * mb##T12_im + ma##21_im * mb##T12_re + \
ma##22_re * mb##T22_im + ma##22_im * mb##T22_re; \
ma##20_re = fa_re; \
ma##20_im = fa_im; \
ma##21_re = fb_re; \
ma##21_im = fb_im; \
ma##22_re = fc_re; \
ma##22_im = fc_im; \
}while(0)
#define MULT_SU3_AN_TEST(ma, mb) do{ \
float fa_re, fa_im, fb_re, fb_im, fc_re, fc_im; \
fa_re = \
ma##T00_re * mb##00_re - ma##T00_im * mb##00_im + \
ma##T01_re * mb##10_re - ma##T01_im * mb##10_im + \
ma##T02_re * mb##20_re - ma##T02_im * mb##20_im; \
fa_im = \
ma##T00_re * mb##00_im + ma##T00_im * mb##00_re + \
ma##T01_re * mb##10_im + ma##T01_im * mb##10_re + \
ma##T02_re * mb##20_im + ma##T02_im * mb##20_re; \
fb_re = \
ma##T10_re * mb##00_re - ma##T10_im * mb##00_im + \
ma##T11_re * mb##10_re - ma##T11_im * mb##10_im + \
ma##T12_re * mb##20_re - ma##T12_im * mb##20_im; \
fb_im = \
ma##T10_re * mb##00_im + ma##T10_im * mb##00_re + \
ma##T11_re * mb##10_im + ma##T11_im * mb##10_re + \
ma##T12_re * mb##20_im + ma##T12_im * mb##20_re; \
fc_re = \
ma##T20_re * mb##00_re - ma##T20_im * mb##00_im + \
ma##T21_re * mb##10_re - ma##T21_im * mb##10_im + \
ma##T22_re * mb##20_re - ma##T22_im * mb##20_im; \
fc_im = \
ma##T20_re * mb##00_im + ma##T20_im * mb##00_re + \
ma##T21_re * mb##10_im + ma##T21_im * mb##10_re + \
ma##T22_re * mb##20_im + ma##T22_im * mb##20_re; \
mb##00_re = fa_re; \
mb##00_im = fa_im; \
mb##10_re = fb_re; \
mb##10_im = fb_im; \
mb##20_re = fc_re; \
mb##20_im = fc_im; \
fa_re = \
ma##T00_re * mb##01_re - ma##T00_im * mb##01_im + \
ma##T01_re * mb##11_re - ma##T01_im * mb##11_im + \
ma##T02_re * mb##21_re - ma##T02_im * mb##21_im; \
fa_im = \
ma##T00_re * mb##01_im + ma##T00_im * mb##01_re + \
ma##T01_re * mb##11_im + ma##T01_im * mb##11_re + \
ma##T02_re * mb##21_im + ma##T02_im * mb##21_re; \
fb_re = \
ma##T10_re * mb##01_re - ma##T10_im * mb##01_im + \
ma##T11_re * mb##11_re - ma##T11_im * mb##11_im + \
ma##T12_re * mb##21_re - ma##T12_im * mb##21_im; \
fb_im = \
ma##T10_re * mb##01_im + ma##T10_im * mb##01_re + \
ma##T11_re * mb##11_im + ma##T11_im * mb##11_re + \
ma##T12_re * mb##21_im + ma##T12_im * mb##21_re; \
fc_re = \
ma##T20_re * mb##01_re - ma##T20_im * mb##01_im + \
ma##T21_re * mb##11_re - ma##T21_im * mb##11_im + \
ma##T22_re * mb##21_re - ma##T22_im * mb##21_im; \
fc_im = \
ma##T20_re * mb##01_im + ma##T20_im * mb##01_re + \
ma##T21_re * mb##11_im + ma##T21_im * mb##11_re + \
ma##T22_re * mb##21_im + ma##T22_im * mb##21_re; \
mb##01_re = fa_re; \
mb##01_im = fa_im; \
mb##11_re = fb_re; \
mb##11_im = fb_im; \
mb##21_re = fc_re; \
mb##21_im = fc_im; \
fa_re = \
ma##T00_re * mb##02_re - ma##T00_im * mb##02_im + \
ma##T01_re * mb##12_re - ma##T01_im * mb##12_im + \
ma##T02_re * mb##22_re - ma##T02_im * mb##22_im; \
fa_im = \
ma##T00_re * mb##02_im + ma##T00_im * mb##02_re + \
ma##T01_re * mb##12_im + ma##T01_im * mb##12_re + \
ma##T02_re * mb##22_im + ma##T02_im * mb##22_re; \
fb_re = \
ma##T10_re * mb##02_re - ma##T10_im * mb##02_im + \
ma##T11_re * mb##12_re - ma##T11_im * mb##12_im + \
ma##T12_re * mb##22_re - ma##T12_im * mb##22_im; \
fb_im = \
ma##T10_re * mb##02_im + ma##T10_im * mb##02_re + \
ma##T11_re * mb##12_im + ma##T11_im * mb##12_re + \
ma##T12_re * mb##22_im + ma##T12_im * mb##22_re; \
fc_re = \
ma##T20_re * mb##02_re - ma##T20_im * mb##02_im + \
ma##T21_re * mb##12_re - ma##T21_im * mb##12_im + \
ma##T22_re * mb##22_re - ma##T22_im * mb##22_im; \
fc_im = \
ma##T20_re * mb##02_im + ma##T20_im * mb##02_re + \
ma##T21_re * mb##12_im + ma##T21_im * mb##12_re + \
ma##T22_re * mb##22_im + ma##T22_im * mb##22_re; \
mb##02_re = fa_re; \
mb##02_im = fa_im; \
mb##12_re = fb_re; \
mb##12_im = fb_im; \
mb##22_re = fc_re; \
mb##22_im = fc_im; \
}while(0)
#define GF_SITE_MATRIX_LOAD_TEX 1
#if (GF_SITE_MATRIX_LOAD_TEX == 1)
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink0TexSingle, dir, idx, var)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink1TexSingle, dir, idx, var)
#else
#define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkEven, dir, idx, var)
#define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkOdd, dir, idx, var)
#endif
#define LOAD_MATRIX LOAD_MATRIX_12_SINGLE
#define LOAD_ANTI_HERMITIAN LOAD_ANTI_HERMITIAN_SINGLE
#define WRITE_ANTI_HERMITIAN WRITE_ANTI_HERMITIAN_SINGLE
#define RECONSTRUCT_MATRIX RECONSTRUCT_LINK_12
__constant__ int path_max_length;
void
gauge_force_init_cuda(QudaGaugeParam* param, int path_max_length)
{
static int gauge_force_init_cuda_flag = 0;
if (gauge_force_init_cuda_flag){
return;
}
gauge_force_init_cuda_flag=1;
init_kernel_cuda(param);
cudaMemcpyToSymbol("path_max_length", &path_max_length, sizeof(int));
}
#define COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mydir, idx) do { \
switch(mydir){ \
case 0: \
new_mem_idx = ( (new_x1==X1m1)?idx-X1m1:idx+1); \
new_x1 = (new_x1==X1m1)?0:new_x1+1; \
break; \
case 1: \
new_mem_idx = ( (new_x2==X2m1)?idx-X2X1mX1:idx+X1); \
new_x2 = (new_x2==X2m1)?0:new_x2+1; \
break; \
case 2: \
new_mem_idx = ( (new_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \
new_x3 = (new_x3==X3m1)?0:new_x3+1; \
break; \
case 3: \
new_mem_idx = ( (new_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \
new_x4 = (new_x4==X4m1)?0:new_x4+1; \
break; \
} \
}while(0)
#define COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mydir, idx) do { \
switch(mydir){ \
case 0: \
new_mem_idx = ( (new_x1==0)?idx+X1m1:idx-1); \
new_x1 = (new_x1==0)?X1m1:new_x1 - 1; \
break; \
case 1: \
new_mem_idx = ( (new_x2==0)?idx+X2X1mX1:idx-X1); \
new_x2 = (new_x2==0)?X2m1:new_x2 - 1; \
break; \
case 2: \
new_mem_idx = ( (new_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \
new_x3 = (new_x3==0)?X3m1:new_x3 - 1; \
break; \
case 3: \
new_mem_idx = ( (new_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \
new_x4 = (new_x4==0)?X4m1:new_x4 - 1; \
break; \
} \
}while(0)
#define GF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, i1,i2,i3,i4) do { \
sign =1; \
switch(dir){ \
case XUP: \
if ( (i4 & 1) == 1){ \
sign = 1; \
} \
break; \
case YUP: \
if ( ((i4+i1) & 1) == 1){ \
sign = 1; \
} \
break; \
case ZUP: \
if ( ((i4+i1+i2) & 1) == 1){ \
sign = 1; \
} \
break; \
case TUP: \
if (i4 == X4m1 ){ \
sign = 1; \
} \
break; \
} \
}while (0)
//for now we only consider 12-reconstruct and single precision
template<int oddBit>
__global__ void
parity_compute_gauge_force_kernel(float2* momEven, float2* momOdd,
int dir, double eb3,
float4* linkEven, float4* linkOdd,
int* input_path,
int* length, float* path_coeff, int num_paths)
{
int i,j=0;
int sid = blockIdx.x * blockDim.x + threadIdx.x;
int z1 = FAST_INT_DIVIDE(sid, X1h);
int x1h = sid - z1*X1h;
int z2 = FAST_INT_DIVIDE(z1, X2);
int x2 = z1 - z2*X2;
int x4 = FAST_INT_DIVIDE(z2, X3);
int x3 = z2 - x4*X3;
int x1odd = (x2 + x3 + x4 + oddBit) & 1;
int x1 = 2*x1h + x1odd;
int X = 2*sid + x1odd;
int sign = 1;
float2* mymom=momEven;
if (oddBit){
mymom = momOdd;
}
float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4;
float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4;
float2 STAPLE0, STAPLE1, STAPLE2, STAPLE3,STAPLE4, STAPLE5, STAPLE6, STAPLE7, STAPLE8;
float2 AH0, AH1, AH2, AH3, AH4;
int new_mem_idx;
SET_SU3_MATRIX(staple, 0);
for(i=0;i < num_paths; i++){
int nbr_oddbit = (oddBit^1 );
int new_x1 =x1;
int new_x2 =x2;
int new_x3 =x3;
int new_x4 =x4;
COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(dir, X);
//linka: current matrix
//linkb: the loaded matrix in this round
SET_UNIT_SU3_MATRIX(linka);
int* path = input_path + i*path_max_length;
int lnkdir;
int path0 = path[0];
if (GOES_FORWARDS(path0)){
lnkdir=path0;
}else{
lnkdir=OPP_DIR(path0);
COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(path0), new_mem_idx);
nbr_oddbit = nbr_oddbit^1;
}
int nbr_idx = new_mem_idx >>1;
if (nbr_oddbit){
LOAD_ODD_MATRIX( lnkdir, nbr_idx, LINKB);
}else{
LOAD_EVEN_MATRIX( lnkdir, nbr_idx, LINKB);
}
GF_COMPUTE_RECONSTRUCT_SIGN(sign, lnkdir, new_x1, new_x2, new_x3, new_x4);
RECONSTRUCT_MATRIX(lnkdir, nbr_idx, sign, linkb);
if (GOES_FORWARDS(path0)){
COPY_SU3_MATRIX(linkb, linka);
COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(path0, new_mem_idx);
nbr_oddbit = nbr_oddbit^1;
}else{
SU3_ADJOINT(linkb, linka);
}
for(j=1; j < length[i]; j++){
int lnkdir;
int pathj = path[j];
if (GOES_FORWARDS(pathj)){
lnkdir=pathj;
}else{
lnkdir=OPP_DIR(pathj);
COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(pathj), new_mem_idx);
nbr_oddbit = nbr_oddbit^1;
}
int nbr_idx = new_mem_idx >>1;
if (nbr_oddbit){
LOAD_ODD_MATRIX(lnkdir, nbr_idx, LINKB);
}else{
LOAD_EVEN_MATRIX(lnkdir, nbr_idx, LINKB);
}
GF_COMPUTE_RECONSTRUCT_SIGN(sign, lnkdir, new_x1, new_x2, new_x3, new_x4);
RECONSTRUCT_MATRIX(lnkdir, nbr_idx, sign, linkb);
if (GOES_FORWARDS(pathj)){
MULT_SU3_NN_TEST(linka, linkb);
COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(pathj, new_mem_idx);
nbr_oddbit = nbr_oddbit^1;
}else{
MULT_SU3_NA_TEST(linka, linkb);
}
}//j
SCALAR_MULT_ADD_SU3_MATRIX(staple, linka, path_coeff[i], staple);
}//i
//update mom
if (oddBit){
LOAD_ODD_MATRIX(dir, sid, LINKA);
}else{
LOAD_EVEN_MATRIX(dir, sid, LINKA);
}
GF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, x1, x2, x3, x4);
RECONSTRUCT_MATRIX(dir, sid, sign, linka);
MULT_SU3_NN_TEST(linka, staple);
LOAD_ANTI_HERMITIAN(mymom, dir, sid, AH);
UNCOMPRESS_ANTI_HERMITIAN(ah, linkb);
SCALAR_MULT_SUB_SU3_MATRIX(linkb, linka, eb3, linka);
MAKE_ANTI_HERMITIAN(linka, ah);
WRITE_ANTI_HERMITIAN(mymom, dir, sid, AH);
return;
}
void
gauge_force_cuda(FullMom cudaMom, int dir, double eb3, FullGauge cudaSiteLink,
QudaGaugeParam* param, int** input_path,
int* length, void* path_coeff, int num_paths, int max_length)
{
int i, j;
//input_path
int bytes = num_paths*max_length* sizeof(int);
int* input_path_d;
cudaMalloc((void**)&input_path_d, bytes); checkCudaError();
cudaMemset(input_path_d, 0, bytes);checkCudaError();
int* input_path_h = (int*)malloc(bytes);
if (input_path_h == NULL){
printf("ERROR: malloc failed for input_path_h in function %s\n", __FUNCTION__);
exit(1);
}
memset(input_path_h, 0, bytes);
for(i=0;i < num_paths;i++){
for(j=0; j < length[i]; j++){
input_path_h[i*max_length + j] =input_path[i][j];
}
}
cudaMemcpy(input_path_d, input_path_h, bytes, cudaMemcpyHostToDevice); checkCudaError();
//length
int* length_d;
cudaMalloc((void**)&length_d, num_paths*sizeof(int)); checkCudaError();
cudaMemcpy(length_d, length, num_paths*sizeof(int), cudaMemcpyHostToDevice); checkCudaError();
//path_coeff
int gsize;
if (param->cuda_prec == QUDA_DOUBLE_PRECISION){
gsize = sizeof(double);
}else{
gsize= sizeof(float);
}
void* path_coeff_d;
cudaMalloc((void**)&path_coeff_d, num_paths*gsize); checkCudaError();
cudaMemcpy(path_coeff_d, path_coeff, num_paths*gsize, cudaMemcpyHostToDevice); checkCudaError();
//compute the gauge forces
int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3];
dim3 blockDim(BLOCK_DIM, 1,1);
dim3 gridDim(volume/blockDim.x, 1, 1);
dim3 halfGridDim(volume/(2*blockDim.x), 1, 1);
float2* momEven = (float2*)cudaMom.even;
float2* momOdd = (float2*)cudaMom.odd;
float4* linkEven = (float4*)cudaSiteLink.even;
float4* linkOdd = (float4*)cudaSiteLink.odd;
cudaBindTexture(0, siteLink0TexSingle, cudaSiteLink.even, cudaSiteLink.bytes);
cudaBindTexture(0, siteLink1TexSingle, cudaSiteLink.odd, cudaSiteLink.bytes);
parity_compute_gauge_force_kernel<0><<<halfGridDim, blockDim>>>(momEven, momOdd,
dir, eb3,
linkEven, linkOdd,
input_path_d, length_d, (float*)path_coeff_d,
num_paths);
//odd
/* The reason we do not switch the even/odd function input paramemters and the texture binding
* is that we use the oddbit to decided where to load, in the kernel function
*/
parity_compute_gauge_force_kernel<1><<<halfGridDim, blockDim>>>(momEven, momOdd,
dir, eb3,
linkEven, linkOdd,
input_path_d, length_d, (float*)path_coeff_d,
num_paths);
cudaUnbindTexture(siteLink0TexSingle);
cudaUnbindTexture(siteLink1TexSingle);
checkCudaError();
cudaFree(input_path_d); checkCudaError();
free(input_path_h);
cudaFree(length_d);
cudaFree(path_coeff_d);
}
#undef LOAD_EVEN_MATRIX
#undef LOAD_ODD_MATRIX
#undef LOAD_MATRIX
#undef LOAD_ANTI_HERMITIAN
#undef WRITE_ANTI_HERMITIAN
#undef RECONSTRUCT_MATRIX
|
d5fa2e91686672888f582ece5595274bfa0e1a78.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <string>
#ifdef _WIN32
# define EXPORT __declspec(dllexport)
#else
# define EXPORT
#endif
int dynamic_base_func(int);
EXPORT int __host__ cuda_dynamic_host_func(int x)
{
return dynamic_base_func(x);
}
static __global__ void DetermineIfValidCudaDevice()
{
}
EXPORT int choose_cuda_device()
{
int nDevices = 0;
hipError_t err = hipGetDeviceCount(&nDevices);
if (err != hipSuccess) {
std::cerr << "Failed to retrieve the number of CUDA enabled devices"
<< std::endl;
return 1;
}
for (int i = 0; i < nDevices; ++i) {
hipDeviceProp_t prop;
hipError_t err = hipGetDeviceProperties(&prop, i);
if (err != hipSuccess) {
std::cerr << "Could not retrieve properties from CUDA device " << i
<< std::endl;
return 1;
}
if (prop.major >= 3) {
err = hipSetDevice(i);
if (err != hipSuccess) {
std::cout << "Could not select CUDA device " << i << std::endl;
} else {
return 0;
}
}
}
std::cout << "Could not find a CUDA enabled card supporting compute >=3.0"
<< std::endl;
return 1;
}
EXPORT void cuda_dynamic_lib_func()
{
hipLaunchKernelGGL(( DetermineIfValidCudaDevice), dim3(1), dim3(1), 0, 0, );
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
std::cerr << "DetermineIfValidCudaDevice [SYNC] failed: "
<< hipGetErrorString(err) << std::endl;
}
err = hipDeviceSynchronize();
if (err != hipSuccess) {
std::cerr << "DetermineIfValidCudaDevice [ASYNC] failed: "
<< hipGetErrorString(hipGetLastError()) << std::endl;
}
}
| d5fa2e91686672888f582ece5595274bfa0e1a78.cu |
#include <cuda.h>
#include <iostream>
#include <string>
#ifdef _WIN32
# define EXPORT __declspec(dllexport)
#else
# define EXPORT
#endif
int dynamic_base_func(int);
EXPORT int __host__ cuda_dynamic_host_func(int x)
{
return dynamic_base_func(x);
}
static __global__ void DetermineIfValidCudaDevice()
{
}
EXPORT int choose_cuda_device()
{
int nDevices = 0;
cudaError_t err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess) {
std::cerr << "Failed to retrieve the number of CUDA enabled devices"
<< std::endl;
return 1;
}
for (int i = 0; i < nDevices; ++i) {
cudaDeviceProp prop;
cudaError_t err = cudaGetDeviceProperties(&prop, i);
if (err != cudaSuccess) {
std::cerr << "Could not retrieve properties from CUDA device " << i
<< std::endl;
return 1;
}
if (prop.major >= 3) {
err = cudaSetDevice(i);
if (err != cudaSuccess) {
std::cout << "Could not select CUDA device " << i << std::endl;
} else {
return 0;
}
}
}
std::cout << "Could not find a CUDA enabled card supporting compute >=3.0"
<< std::endl;
return 1;
}
EXPORT void cuda_dynamic_lib_func()
{
DetermineIfValidCudaDevice<<<1, 1>>>();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
std::cerr << "DetermineIfValidCudaDevice [SYNC] failed: "
<< cudaGetErrorString(err) << std::endl;
}
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
std::cerr << "DetermineIfValidCudaDevice [ASYNC] failed: "
<< cudaGetErrorString(cudaGetLastError()) << std::endl;
}
}
|
1dec010116870d5c23210c08668507251e8bd1a8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* faster-cuda-aes.cu
*
* Created on: Sep 6, 2018
* Author: hb4ch
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "faster-cuda-aes.hpp"
#include "faster-table.hpp"
//copy block from inp to outp(1 block per thread)
__device__ void copy_block(uint8_t *inp, uint8_t *out, uint32_t offset){
//word size traversal
uint32_t *id = (uint32_t *)inp;
uint32_t *od = (uint32_t *)out;
for(int i = 0; i < 4; ++i){
od[offset/4 + i] = id[offset/4 + i];
}
}
//XOR round key with block(1 block per thread)
__device__ void add_round_key(uint8_t *block, uint8_t *key, uint32_t offset){
//word size traversal
uint32_t *b = (uint32_t *)block;
uint32_t *k = (uint32_t *)key;
for(int i = 0; i < 4; ++i){
b[offset/4 + i] = b[offset/4 + i] ^ k[i];
}
}
//substitute block int faster_sbox (1 block per thread)
__device__ void sub_bytes(uint8_t *block, uint32_t offset){
for(int i = 0; i < 16; ++i){
block[offset + i] = faster_sbox[block[offset + i]];
}
}
//mix columns by taking linear combinations in the field (1 block per thread)
__device__ void mix_columns(uint8_t *block, uint32_t offset){
for(int i = 0; i < 4; ++i){ //iterate over columns
uint8_t a[4];
uint8_t b[4];
uint8_t h;
for(int j = 0; j < 4; ++j){
a[j] = block[offset + 4*i + j];
h = (uint8_t)((int8_t)a[j] >> 7);
b[j] = a[j] << 1;
b[j] ^= 0x1b & h;
}
block[offset + 4*i + 0] = b[0] ^ a[3] ^ a[2] ^ b[1] ^ a[1];
block[offset + 4*i + 1] = b[1] ^ a[0] ^ a[3] ^ b[2] ^ a[2];
block[offset + 4*i + 2] = b[2] ^ a[1] ^ a[0] ^ b[3] ^ a[3];
block[offset + 4*i + 3] = b[3] ^ a[2] ^ a[1] ^ b[0] ^ a[0];
}
}
//shift rows left by 0,1,2,3 bytes respectively (1 block per thread)
__device__ void shift_rows(uint8_t *sblock, uint32_t offset){
uint8_t tmp;
uint8_t *block = sblock + offset;
//row 0 remains unshifted
//shift row 1 left by 1
tmp = block[1];
block[1] = block[5];
block[5] = block[9];
block[9] = block[13];
block[13] = tmp;
//shift row 2 letf by 2
tmp = block[2];
block[2] = block[10];
block[10] = tmp;
tmp = block[6];
block[6] = block[14];
block[14] = tmp;
//shift row 3 left by 3
tmp = block[3];
block[3] = block[15];
block[15] = block[11];
block[11] = block[7];
block[7] = tmp;
}
//aes 128 encryption with expanded key supplied
//implemented as basic byte algorithm (naive)
//operates on one block per thread
__device__ void encrypt(uint8_t *block, uint8_t *rkey, uint32_t offset){
add_round_key(block, rkey, offset);
for(int i = 1; i < 10; ++i){
sub_bytes(block, offset);
shift_rows(block, offset);
mix_columns(block, offset);
add_round_key(block, rkey + 16*i, offset);
}
sub_bytes(block, offset);
shift_rows(block, offset);
add_round_key(block, rkey + 160, offset);
}
//aes 128 encryption with expanded key supplied
//implemented using 4 t-tables and faster_sbox
//(watch for endianness) (1 block per thread)
__device__ void encrypt_full_table(uint8_t *block, uint8_t *rkey, uint32_t offset){
uint8_t *b = (block + offset);
uint32_t *bword = (uint32_t *)(block + offset);
add_round_key(block, rkey, offset);
for(int i = 1; i < 10; ++i){
uint32_t *ckey = (uint32_t *)(rkey + 16*i);
uint32_t c1 = te0[b[0]] ^ te1[b[5]] ^ te2[b[10]] ^ te3[b[15]] ^ ckey[0];
uint32_t c2 = te0[b[4]] ^ te1[b[9]] ^ te2[b[14]] ^ te3[b[3]] ^ ckey[1];
uint32_t c3 = te0[b[8]] ^ te1[b[13]] ^ te2[b[2]] ^ te3[b[7]] ^ ckey[2];
uint32_t c4 = te0[b[12]] ^ te1[b[1]] ^ te2[b[6]] ^ te3[b[11]] ^ ckey[3];
bword[0] = c1;
bword[1] = c2;
bword[2] = c3;
bword[3] = c4;
}
sub_bytes(block, offset);
shift_rows(block, offset);
add_round_key(block, rkey + 160, offset);
}
//aes 128 encryption with expanded key supplied
//implemented using 1 t-tables (with rotation) and faster_sbox
//1 block per thread
__device__ void encrypt_one_table(uint8_t *block, uint8_t *rkey, uint32_t offset){
uint8_t *b = (block + offset);
uint32_t *bword = (uint32_t *)(block + offset);
add_round_key(block, rkey, offset);
for(int i = 1; i < 10; ++i){
uint32_t *ckey = (uint32_t *)(rkey + 16*i);
uint32_t c1 = te0[b[0]] ^ (te0[b[5]]<<8 | te0[b[5]]>>24) ^ (te0[b[10]]<<16 | te0[b[10]]>>16) ^ (te0[b[15]]<<24 | te0[b[15]]>>8) ^ ckey[0];
uint32_t c2 = te0[b[4]] ^ (te0[b[9]]<<8 | te0[b[9]]>>24) ^ (te0[b[14]]<<16 | te0[b[14]]>>16) ^ (te0[b[3]]<<24 | te0[b[3]]>>8) ^ ckey[1];
uint32_t c3 = te0[b[8]] ^ (te0[b[13]]<<8 | te0[b[13]]>>24) ^ (te0[b[2]]<<16 | te0[b[2]]>>16) ^ (te0[b[7]]<<24 | te0[b[7]]>>8) ^ ckey[2];
uint32_t c4 = te0[b[12]] ^ (te0[b[1]]<<8 | te0[b[1]]>>24) ^ (te0[b[6]]<<16 | te0[b[6]]>>16) ^ (te0[b[11]]<<24 | te0[b[11]]>>8) ^ ckey[3];
bword[0] = c1;
bword[1] = c2;
bword[2] = c3;
bword[3] = c4;
}
sub_bytes(block, offset);
shift_rows(block, offset);
add_round_key(block, rkey + 160, offset);
}
//perform aes 128 encryption with either a single table or 4 tables
//offset is the location of the working block in block
//boffset is the column in the working block (0 to 3)
//operates on 1 word per thread
__device__ void encrypt_full_perword(uint8_t *block, uint8_t *rkey, uint32_t offset, uint8_t col){
uint8_t *b = block + offset;
uint32_t *bword = (uint32_t *)(block + offset); //start of the block
uint32_t *rwkey = (uint32_t *)rkey;
//perform add_round_key performed on single column
bword[col] = bword[col] ^ rwkey[col];
for(int i = 1; i < 10; ++i){
uint32_t *ckey = (uint32_t *)(rkey + 16*i);
int j = col * 4;
//multiple t table
// uint32_t c = te0[b[j]] ^ te1[b[(j+5)&0xf]] ^ te2[b[(j+10)&0xf]] ^ te3[b[(j+15)&0xf]] ^ ckey[col];
//single t table
uint32_t t1 = te0[b[j]];
uint32_t t2 = te0[b[(j+5)&0xf]];
uint32_t t3 = te0[b[(j+10)&0xf]];
uint32_t t4 = te0[b[(j+15)&0xf]];
uint32_t c = t1 ^ (t2<<8 | t2>>24) ^ (t3<<16 | t3>>16) ^ (t4<<24 | t4>>8) ^ ckey[col];
bword[col] = c;
}
//subbytes
uint8_t v1 = faster_sbox[b[(col*4 + 0)&0xf]];
uint8_t v2 = faster_sbox[b[(col*4 + 5)&0xf]];
uint8_t v3 = faster_sbox[b[(col*4 + 10)&0xf]];
uint8_t v4 = faster_sbox[b[(col*4 + 15)&0xf]];
//__syncthreads(); should all move together so not a broblem
b[col*4 + 0] = v1;
b[col*4 + 1] = v2;
b[col*4 + 2] = v3;
b[col*4 + 3] = v4;
//add last round key
bword[col] ^= rwkey[col + 40];
}
//perform counter mode encryption on block
//operates on a single word per thread with no memory fragmeting
__device__ void ctr_encrypt_perword(uint8_t *block, uint8_t *rkey, uint8_t *rseed, uint8_t *shmem, uint32_t toffset, uint8_t shblk, uint8_t col){
uint32_t *b = (uint32_t *)block;
uint32_t *r = (uint32_t *)rseed;
uint32_t *sh = (uint32_t *)shmem;
sh[shblk*4 + col] = r[col] + (col == 0)*(toffset / 16);
//perform encryption
encrypt_full_perword(shmem, rkey, shblk * 16, col);
//xor with data
b[toffset/4 + col] ^= sh[shblk*4 + col];
}
//perform counter mode encryption on block
//naive/ one-table/ or full table mode can be chosen by commenting/uncommenting
//operates on a single block per thread
__device__ void ctr_encrypt(uint8_t *block, uint8_t *rkey, uint8_t *rseed, uint32_t boffset, uint32_t toffset){
uint32_t *b = (uint32_t *)block;
uint32_t *r = (uint32_t *)rseed;
uint32_t addpt[4];
uint8_t *ctr_block = (uint8_t *)addpt;
//word size traversal
for(int i = 0; i < 4; ++i){
addpt[i] = r[i];
}
//add in counter value
addpt[0] = addpt[0] + toffset/16;
//encrypt_full_table(ctr_block, rkey, 0);
encrypt_one_table(ctr_block, rkey, 0);
//encrypt(ctr_block, rkey, 0);
//word size traversal
for(int i = 0; i < 4; ++i){
b[boffset/4 + i] ^= addpt[i];
}
}
//basic encryption kernel. Unused for ctr mode encryption
__global__ void encrypt_k(uint8_t *data, uint8_t *rkey, uint32_t numblock){
int bindex = blockIdx.x * blockDim.x + threadIdx.x;
int offset = bindex * 16;
if(bindex >= numblock) return;
encrypt_one_table(data, rkey, offset);
}
//Temo test helper
__device__ void inc_block(uint8_t *data, uint32_t offset){
//word size traversal
uint32_t *dat = (uint32_t *)data;
for(int i = 0; i < 4; ++i){
dat[offset/4 + i] ^= dat[offset/4 + i];
}
}
//perform counter encryption using a single thread per word with no memory fragmentation
__global__ void ctr_encrypt_nofrag_perword(uint8_t *data, uint8_t *rkey, uint32_t numblock){
uint32_t cindex = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; //index into column
uint32_t bindex = cindex/4;
uint32_t offset = bindex * 16;
uint8_t shblk = bindex % 16;
uint8_t col = cindex % 4;
//memory for performing the encryption
__shared__ uint32_t shmem[64];
if(bindex >= numblock)return;
ctr_encrypt_perword(data, rkey, g_rseed, (uint8_t *)shmem, offset, shblk, col);
}
//perform ctr encryption with a single thread per block with no memory fragmentation
__global__ void ctr_encrypt_k_nofrag(uint8_t *data, uint8_t *rkey, uint32_t numblock){
int bindex = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
int toffset = bindex * 16;
if(bindex >= numblock) return;
ctr_encrypt(data, rkey, g_rseed, toffset, toffset);
}
//perform ctr encryption with a single thread per block with memory fragmentation to
//enable better memory access patterns
__global__ void ctr_encrypt_k_frag(uint8_t *data, uint8_t *rkey, uint32_t numblock){
__shared__ uint8_t smem[64 * 20];
uint32_t *swmem = (uint32_t *)smem;
uint32_t *wdata = (uint32_t *)data;
int bindex = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
int toffset = bindex * 16;
int boffset = threadIdx.x * 20; //5 bytes for better memory access patterns
if(bindex >= numblock) return;
for(int i = 0; i < 4; ++i){ //copy block data to memory
swmem[boffset/4 + i] = wdata[toffset/4 + i];
}
ctr_encrypt(smem, rkey, g_rseed, boffset, toffset);
for(int i = 0; i < 4; ++i){ //copy block data to memory
wdata[toffset/4 + i] = swmem[boffset/4 + i];
}
}
| 1dec010116870d5c23210c08668507251e8bd1a8.cu | /*
* faster-cuda-aes.cu
*
* Created on: Sep 6, 2018
* Author: hb4ch
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "faster-cuda-aes.hpp"
#include "faster-table.hpp"
//copy block from inp to outp(1 block per thread)
__device__ void copy_block(uint8_t *inp, uint8_t *out, uint32_t offset){
//word size traversal
uint32_t *id = (uint32_t *)inp;
uint32_t *od = (uint32_t *)out;
for(int i = 0; i < 4; ++i){
od[offset/4 + i] = id[offset/4 + i];
}
}
//XOR round key with block(1 block per thread)
__device__ void add_round_key(uint8_t *block, uint8_t *key, uint32_t offset){
//word size traversal
uint32_t *b = (uint32_t *)block;
uint32_t *k = (uint32_t *)key;
for(int i = 0; i < 4; ++i){
b[offset/4 + i] = b[offset/4 + i] ^ k[i];
}
}
//substitute block int faster_sbox (1 block per thread)
__device__ void sub_bytes(uint8_t *block, uint32_t offset){
for(int i = 0; i < 16; ++i){
block[offset + i] = faster_sbox[block[offset + i]];
}
}
//mix columns by taking linear combinations in the field (1 block per thread)
__device__ void mix_columns(uint8_t *block, uint32_t offset){
for(int i = 0; i < 4; ++i){ //iterate over columns
uint8_t a[4];
uint8_t b[4];
uint8_t h;
for(int j = 0; j < 4; ++j){
a[j] = block[offset + 4*i + j];
h = (uint8_t)((int8_t)a[j] >> 7);
b[j] = a[j] << 1;
b[j] ^= 0x1b & h;
}
block[offset + 4*i + 0] = b[0] ^ a[3] ^ a[2] ^ b[1] ^ a[1];
block[offset + 4*i + 1] = b[1] ^ a[0] ^ a[3] ^ b[2] ^ a[2];
block[offset + 4*i + 2] = b[2] ^ a[1] ^ a[0] ^ b[3] ^ a[3];
block[offset + 4*i + 3] = b[3] ^ a[2] ^ a[1] ^ b[0] ^ a[0];
}
}
//shift rows left by 0,1,2,3 bytes respectively (1 block per thread)
__device__ void shift_rows(uint8_t *sblock, uint32_t offset){
uint8_t tmp;
uint8_t *block = sblock + offset;
//row 0 remains unshifted
//shift row 1 left by 1
tmp = block[1];
block[1] = block[5];
block[5] = block[9];
block[9] = block[13];
block[13] = tmp;
//shift row 2 letf by 2
tmp = block[2];
block[2] = block[10];
block[10] = tmp;
tmp = block[6];
block[6] = block[14];
block[14] = tmp;
//shift row 3 left by 3
tmp = block[3];
block[3] = block[15];
block[15] = block[11];
block[11] = block[7];
block[7] = tmp;
}
//aes 128 encryption with expanded key supplied
//implemented as basic byte algorithm (naive)
//operates on one block per thread
__device__ void encrypt(uint8_t *block, uint8_t *rkey, uint32_t offset){
add_round_key(block, rkey, offset);
for(int i = 1; i < 10; ++i){
sub_bytes(block, offset);
shift_rows(block, offset);
mix_columns(block, offset);
add_round_key(block, rkey + 16*i, offset);
}
sub_bytes(block, offset);
shift_rows(block, offset);
add_round_key(block, rkey + 160, offset);
}
//aes 128 encryption with expanded key supplied
//implemented using 4 t-tables and faster_sbox
//(watch for endianness) (1 block per thread)
__device__ void encrypt_full_table(uint8_t *block, uint8_t *rkey, uint32_t offset){
uint8_t *b = (block + offset);
uint32_t *bword = (uint32_t *)(block + offset);
add_round_key(block, rkey, offset);
for(int i = 1; i < 10; ++i){
uint32_t *ckey = (uint32_t *)(rkey + 16*i);
uint32_t c1 = te0[b[0]] ^ te1[b[5]] ^ te2[b[10]] ^ te3[b[15]] ^ ckey[0];
uint32_t c2 = te0[b[4]] ^ te1[b[9]] ^ te2[b[14]] ^ te3[b[3]] ^ ckey[1];
uint32_t c3 = te0[b[8]] ^ te1[b[13]] ^ te2[b[2]] ^ te3[b[7]] ^ ckey[2];
uint32_t c4 = te0[b[12]] ^ te1[b[1]] ^ te2[b[6]] ^ te3[b[11]] ^ ckey[3];
bword[0] = c1;
bword[1] = c2;
bword[2] = c3;
bword[3] = c4;
}
sub_bytes(block, offset);
shift_rows(block, offset);
add_round_key(block, rkey + 160, offset);
}
//aes 128 encryption with expanded key supplied
//implemented using 1 t-tables (with rotation) and faster_sbox
//1 block per thread
__device__ void encrypt_one_table(uint8_t *block, uint8_t *rkey, uint32_t offset){
uint8_t *b = (block + offset);
uint32_t *bword = (uint32_t *)(block + offset);
add_round_key(block, rkey, offset);
for(int i = 1; i < 10; ++i){
uint32_t *ckey = (uint32_t *)(rkey + 16*i);
uint32_t c1 = te0[b[0]] ^ (te0[b[5]]<<8 | te0[b[5]]>>24) ^ (te0[b[10]]<<16 | te0[b[10]]>>16) ^ (te0[b[15]]<<24 | te0[b[15]]>>8) ^ ckey[0];
uint32_t c2 = te0[b[4]] ^ (te0[b[9]]<<8 | te0[b[9]]>>24) ^ (te0[b[14]]<<16 | te0[b[14]]>>16) ^ (te0[b[3]]<<24 | te0[b[3]]>>8) ^ ckey[1];
uint32_t c3 = te0[b[8]] ^ (te0[b[13]]<<8 | te0[b[13]]>>24) ^ (te0[b[2]]<<16 | te0[b[2]]>>16) ^ (te0[b[7]]<<24 | te0[b[7]]>>8) ^ ckey[2];
uint32_t c4 = te0[b[12]] ^ (te0[b[1]]<<8 | te0[b[1]]>>24) ^ (te0[b[6]]<<16 | te0[b[6]]>>16) ^ (te0[b[11]]<<24 | te0[b[11]]>>8) ^ ckey[3];
bword[0] = c1;
bword[1] = c2;
bword[2] = c3;
bword[3] = c4;
}
sub_bytes(block, offset);
shift_rows(block, offset);
add_round_key(block, rkey + 160, offset);
}
//perform aes 128 encryption with either a single table or 4 tables
//offset is the location of the working block in block
//boffset is the column in the working block (0 to 3)
//operates on 1 word per thread
__device__ void encrypt_full_perword(uint8_t *block, uint8_t *rkey, uint32_t offset, uint8_t col){
uint8_t *b = block + offset;
uint32_t *bword = (uint32_t *)(block + offset); //start of the block
uint32_t *rwkey = (uint32_t *)rkey;
//perform add_round_key performed on single column
bword[col] = bword[col] ^ rwkey[col];
for(int i = 1; i < 10; ++i){
uint32_t *ckey = (uint32_t *)(rkey + 16*i);
int j = col * 4;
//multiple t table
// uint32_t c = te0[b[j]] ^ te1[b[(j+5)&0xf]] ^ te2[b[(j+10)&0xf]] ^ te3[b[(j+15)&0xf]] ^ ckey[col];
//single t table
uint32_t t1 = te0[b[j]];
uint32_t t2 = te0[b[(j+5)&0xf]];
uint32_t t3 = te0[b[(j+10)&0xf]];
uint32_t t4 = te0[b[(j+15)&0xf]];
uint32_t c = t1 ^ (t2<<8 | t2>>24) ^ (t3<<16 | t3>>16) ^ (t4<<24 | t4>>8) ^ ckey[col];
bword[col] = c;
}
//subbytes
uint8_t v1 = faster_sbox[b[(col*4 + 0)&0xf]];
uint8_t v2 = faster_sbox[b[(col*4 + 5)&0xf]];
uint8_t v3 = faster_sbox[b[(col*4 + 10)&0xf]];
uint8_t v4 = faster_sbox[b[(col*4 + 15)&0xf]];
//__syncthreads(); should all move together so not a broblem
b[col*4 + 0] = v1;
b[col*4 + 1] = v2;
b[col*4 + 2] = v3;
b[col*4 + 3] = v4;
//add last round key
bword[col] ^= rwkey[col + 40];
}
//perform counter mode encryption on block
//operates on a single word per thread with no memory fragmeting
__device__ void ctr_encrypt_perword(uint8_t *block, uint8_t *rkey, uint8_t *rseed, uint8_t *shmem, uint32_t toffset, uint8_t shblk, uint8_t col){
uint32_t *b = (uint32_t *)block;
uint32_t *r = (uint32_t *)rseed;
uint32_t *sh = (uint32_t *)shmem;
sh[shblk*4 + col] = r[col] + (col == 0)*(toffset / 16);
//perform encryption
encrypt_full_perword(shmem, rkey, shblk * 16, col);
//xor with data
b[toffset/4 + col] ^= sh[shblk*4 + col];
}
//perform counter mode encryption on block
//naive/ one-table/ or full table mode can be chosen by commenting/uncommenting
//operates on a single block per thread
__device__ void ctr_encrypt(uint8_t *block, uint8_t *rkey, uint8_t *rseed, uint32_t boffset, uint32_t toffset){
uint32_t *b = (uint32_t *)block;
uint32_t *r = (uint32_t *)rseed;
uint32_t addpt[4];
uint8_t *ctr_block = (uint8_t *)addpt;
//word size traversal
for(int i = 0; i < 4; ++i){
addpt[i] = r[i];
}
//add in counter value
addpt[0] = addpt[0] + toffset/16;
//encrypt_full_table(ctr_block, rkey, 0);
encrypt_one_table(ctr_block, rkey, 0);
//encrypt(ctr_block, rkey, 0);
//word size traversal
for(int i = 0; i < 4; ++i){
b[boffset/4 + i] ^= addpt[i];
}
}
//basic encryption kernel. Unused for ctr mode encryption
__global__ void encrypt_k(uint8_t *data, uint8_t *rkey, uint32_t numblock){
int bindex = blockIdx.x * blockDim.x + threadIdx.x;
int offset = bindex * 16;
if(bindex >= numblock) return;
encrypt_one_table(data, rkey, offset);
}
//Temo test helper
__device__ void inc_block(uint8_t *data, uint32_t offset){
//word size traversal
uint32_t *dat = (uint32_t *)data;
for(int i = 0; i < 4; ++i){
dat[offset/4 + i] ^= dat[offset/4 + i];
}
}
//perform counter encryption using a single thread per word with no memory fragmentation
__global__ void ctr_encrypt_nofrag_perword(uint8_t *data, uint8_t *rkey, uint32_t numblock){
uint32_t cindex = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; //index into column
uint32_t bindex = cindex/4;
uint32_t offset = bindex * 16;
uint8_t shblk = bindex % 16;
uint8_t col = cindex % 4;
//memory for performing the encryption
__shared__ uint32_t shmem[64];
if(bindex >= numblock)return;
ctr_encrypt_perword(data, rkey, g_rseed, (uint8_t *)shmem, offset, shblk, col);
}
//perform ctr encryption with a single thread per block with no memory fragmentation
__global__ void ctr_encrypt_k_nofrag(uint8_t *data, uint8_t *rkey, uint32_t numblock){
int bindex = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
int toffset = bindex * 16;
if(bindex >= numblock) return;
ctr_encrypt(data, rkey, g_rseed, toffset, toffset);
}
//perform ctr encryption with a single thread per block with memory fragmentation to
//enable better memory access patterns
__global__ void ctr_encrypt_k_frag(uint8_t *data, uint8_t *rkey, uint32_t numblock){
__shared__ uint8_t smem[64 * 20];
uint32_t *swmem = (uint32_t *)smem;
uint32_t *wdata = (uint32_t *)data;
int bindex = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
int toffset = bindex * 16;
int boffset = threadIdx.x * 20; //5 bytes for better memory access patterns
if(bindex >= numblock) return;
for(int i = 0; i < 4; ++i){ //copy block data to memory
swmem[boffset/4 + i] = wdata[toffset/4 + i];
}
ctr_encrypt(smem, rkey, g_rseed, boffset, toffset);
for(int i = 0; i < 4; ++i){ //copy block data to memory
wdata[toffset/4 + i] = swmem[boffset/4 + i];
}
}
|
08ee6d0de02cdb3474c8e51a0f7d4114cb67e959.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef TEXTURE_GPUNUFFT_KERNELS_H
#define TEXTURE_GPUNUFFT_KERNELS_H
#include "gpuNUFFT_kernels.hpp"
#include "../std_gpuNUFFT_kernels.cu"
#include "cuda_utils.cuh"
// ----------------------------------------------------------------------------
// convolutionKernel: NUFFT^H kernel
//
// Performs the gpuNUFFT step by convolution of sample points with
// interpolation function and resampling onto grid. Basic concept based on Zwart
// et al.
//
// parameters:
// * data : complex input sample points
// * crds : coordinates of data points (x,y,z)
// * gdata : output grid data
// * sectors : mapping of sample indices according to each sector
// * sector_centers : coordinates (x,y,z) of sector centers
// * temp_gdata : temporary grid data
// * N : number of threads
__device__ void textureConvolutionFunction(int *sec, int sec_max,
int sec_offset, DType2 *sdata,
DType2 *data, DType *crds,
CufftType *gdata, IndType *sectors,
IndType *sector_centers)
{
// start convolution
int ind, x, y, z;
int imin, imax, jmin, jmax, kmin, kmax;
DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz;
__shared__ IndType3 center;
center.x = sector_centers[sec[threadIdx.x] * 3];
center.y = sector_centers[sec[threadIdx.x] * 3 + 1];
center.z = sector_centers[sec[threadIdx.x] * 3 + 2];
// Grid Points over Threads
int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
// loop over all data points of the current sector, and check if grid position
// lies inside
// affected region, if so, add data point weighted to grid position value
while (data_cnt < sec_max)
{
DType3 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
data_point.z = crds[data_cnt + 2 * GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
kz = mapKSpaceToGrid(data_point.z, GI.gridDims.z, center.z,
GI.sector_offset);
set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius);
// grid this point onto its cartesian points neighbors
for (int k = kmin; k <= kmax; k++)
{
kz = mapGridToKSpace(k, GI.gridDims.z, center.z, GI.sector_offset);
dz_sqr = (kz - data_point.z) * GI.aniso_z_scale;
dz_sqr *= dz_sqr;
for (int j = jmin; j <= jmax; j++)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
for (int i = imin; i <= imax; i++)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv,
dz_sqr * GI.radiusSquared_inv);
ind = getIndex(i, j, k, GI.sector_pad_width);
// multiply data by current kernel val
// grid complex or scalar
atomicAdd(&(sdata[ind].x),
val *
tex1Dfetch(texDATA, data_cnt).x);
atomicAdd(&(sdata[ind].y),
val *
tex1Dfetch(texDATA, data_cnt).y);
} // x
} // y
} // z
data_cnt = data_cnt + blockDim.x;
} // grid points per sector
// write shared data to output grid
__syncthreads();
// int sector_ind_offset = sec * GI.sector_dim;
__shared__ int sector_ind_offset;
sector_ind_offset =
computeXYZ2Lin(center.x - GI.sector_offset, center.y - GI.sector_offset,
center.z - GI.sector_offset, GI.gridDims);
// each thread writes one position from shared mem to global mem
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
getCoordsFromIndex(s_ind, &x, &y, &z, GI.sector_pad_width);
if (isOutlier(x, y, z, center.x, center.y, center.z, GI.gridDims,
GI.sector_offset))
// calculate opposite index
ind = computeXYZ2Lin(
calculateOppositeIndex(x, center.x, GI.gridDims.x, GI.sector_offset),
calculateOppositeIndex(y, center.y, GI.gridDims.y, GI.sector_offset),
calculateOppositeIndex(z, center.z, GI.gridDims.z, GI.sector_offset),
GI.gridDims);
else
ind = sector_ind_offset +
computeXYZ2Lin(x, y, z, GI.gridDims); // index in output grid
atomicAdd(&(gdata[ind].x), sdata[s_ind].x); // Re
atomicAdd(&(gdata[ind].y), sdata[s_ind].y); // Im
// reset shared mem
sdata[s_ind].x = (DType)0.0;
sdata[s_ind].y = (DType)0.0;
}
__syncthreads();
}
__global__ void textureConvolutionKernel(DType2 *data, DType *crds,
CufftType *gdata, IndType *sectors,
IndType *sector_centers, int N)
{
extern __shared__ DType2 sdata[]; // externally managed shared memory
// init shared memory
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
sdata[s_ind].x = (DType)0.0; // Re
sdata[s_ind].y = (DType)0.0; // Im
}
__syncthreads();
__shared__ int sec[THREAD_BLOCK_SIZE];
sec[threadIdx.x] = blockIdx.x;
while (sec[threadIdx.x] < N)
{
__shared__ int data_max;
data_max = sectors[sec[threadIdx.x] + 1];
textureConvolutionFunction(sec, data_max, 0, sdata, data, crds, gdata,
sectors, sector_centers);
__syncthreads();
sec[threadIdx.x] = sec[threadIdx.x] + gridDim.x;
} // sec < sector_count
}
__global__ void balancedTextureConvolutionKernel(
DType2 *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
extern __shared__ DType2 sdata[]; // externally managed shared memory
// init shared memory
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
sdata[s_ind].x = (DType)0.0; // Re
sdata[s_ind].y = (DType)0.0; // Im
}
__syncthreads();
int sec_cnt = blockIdx.x;
__shared__ int sec[THREAD_BLOCK_SIZE];
while (sec_cnt < N)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
__shared__ int data_max;
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]] +
sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureConvolutionFunction(sec, data_max,
sector_processing_order[sec_cnt].y, sdata, data,
crds, gdata, sectors, sector_centers);
__syncthreads();
sec_cnt = sec_cnt + gridDim.x;
} // sec < sector_count
}
// ----------------------------------------------------------------------------
// convolutionKernel: NUFFT^H kernel
//
// Performs the gpuNUFFT step by convolution of sample points with
// interpolation function and resampling onto grid. Basic concept based on Zwart
// et al.
//
// parameters:
// * data : complex input sample points
// * crds : coordinates of data points (x,y,z)
// * gdata : output grid data
// * sectors : mapping of sample indices according to each sector
// * sector_centers : coordinates (x,y,z) of sector centers
// * temp_gdata : temporary grid data
// * N : number of threads
__device__ void textureConvolutionFunction2D(DType2 *sdata, int *sec,
int sec_max, int sec_offset,
DType2 *data, DType *crds,
CufftType *gdata, IndType *sectors,
IndType *sector_centers)
{
// start convolution
int ind, x, y;
int imin, imax, jmin, jmax;
DType dx_sqr, dy_sqr, val, ix, jy;
__shared__ IndType2 center;
center.x = sector_centers[sec[threadIdx.x] * 2];
center.y = sector_centers[sec[threadIdx.x] * 2 + 1];
// Grid Points over Threads
int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
// loop over all data points of the current sector, and check if grid position
// lies inside
// affected region, if so, add data point weighted to grid position value
while (data_cnt < sec_max)
{
DType2 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
// grid this point onto its cartesian points neighbors
for (int j = jmin; j <= jmax; j++)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
for (int i = imin; i <= imax; i++)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
// Calculate Separable Filters
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv);
ind = getIndex2D(i, j, GI.sector_pad_width);
// multiply data by current kernel val
// grid complex or scalar
for (int c = threadIdx.z; c < GI.n_coils_cc; c += blockDim.z)
{
atomicAdd(&(sdata[ind + c * GI.sector_dim].x),
val * tex1Dfetch(texDATA, data_cnt + c * GI.data_count).x);
atomicAdd(&(sdata[ind + c * GI.sector_dim].y),
val * tex1Dfetch(texDATA, data_cnt + c * GI.data_count).y);
}
} // x
} // y
data_cnt = data_cnt + blockDim.x;
} // grid points per sector
// write shared data to output grid
__syncthreads();
// int sector_ind_offset = sec * GI.sector_dim;
__shared__ int sector_ind_offset;
sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,
center.y - GI.sector_offset, GI.gridDims);
// each thread writes one position from shared mem to global mem
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
getCoordsFromIndex2D(s_ind, &x, &y, GI.sector_pad_width);
if (isOutlier2D(x, y, center.x, center.y, GI.gridDims, GI.sector_offset))
// calculate opposite index
ind = computeXY2Lin(
calculateOppositeIndex(x, center.x, GI.gridDims.x, GI.sector_offset),
calculateOppositeIndex(y, center.y, GI.gridDims.y, GI.sector_offset),
GI.gridDims);
else
ind = sector_ind_offset +
computeXY2Lin(x, y, GI.gridDims); // index in output grid
for (int c = threadIdx.z; c < GI.n_coils_cc; c += blockDim.z)
{
atomicAdd(&(gdata[ind + c * GI.gridDims_count].x),
sdata[s_ind + c * GI.sector_dim].x); // Re
atomicAdd(&(gdata[ind + c * GI.gridDims_count].y),
sdata[s_ind + c * GI.sector_dim].y); // Im
// reset shared mem
sdata[s_ind + c * GI.sector_dim].x = (DType)0.0;
sdata[s_ind + c * GI.sector_dim].y = (DType)0.0;
}
}
}
__global__ void textureConvolutionKernel2D(DType2 *data, DType *crds,
CufftType *gdata, IndType *sectors,
IndType *sector_centers, int N)
{
extern __shared__ DType2 sdata[]; // externally managed shared memory
// init shared memory
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
for (int c = threadIdx.z; c < GI.n_coils_cc; c += blockDim.z)
{
sdata[s_ind + c * GI.sector_dim].x = 0.0f; // Re
sdata[s_ind + c * GI.sector_dim].y = 0.0f; // Im
}
}
__syncthreads();
__shared__ int sec[THREAD_BLOCK_SIZE];
sec[threadIdx.x] = blockIdx.x;
while (sec[threadIdx.x] < N)
{
__shared__ int data_max;
data_max = sectors[sec[threadIdx.x] + 1];
textureConvolutionFunction2D(sdata, sec, data_max, 0, data, crds, gdata,
sectors, sector_centers);
__syncthreads();
sec[threadIdx.x] = sec[threadIdx.x] + gridDim.x;
} // sec < sector_count
}
__global__ void balancedTextureConvolutionKernel2D(
DType2 *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
extern __shared__ DType2 sdata[]; // externally managed shared memory
// init shared memory
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
for (int c = threadIdx.z; c < GI.n_coils_cc; c += blockDim.z)
{
sdata[s_ind + c * GI.sector_dim].x = 0.0f; // Re
sdata[s_ind + c * GI.sector_dim].y = 0.0f; // Im
}
}
__syncthreads();
int sec_cnt = blockIdx.x;
__shared__ int sec[THREAD_BLOCK_SIZE];
while (sec_cnt < N)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
__shared__ int data_max;
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]]
+ sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureConvolutionFunction2D(sdata, sec, data_max,
sector_processing_order[sec_cnt].y, data, crds,
gdata, sectors, sector_centers);
__syncthreads();
sec_cnt = sec_cnt + gridDim.x;
} // sec < sector_count
}
void performTextureConvolution(DType2 *data_d, DType *crds_d,
CufftType *gdata_d, DType *kernel_d,
IndType *sectors_d, IndType *sector_centers_d,
gpuNUFFT::GpuNUFFTInfo *gi_host)
{
long shared_mem_size =
(gi_host->sector_dim) * sizeof(DType2) * gi_host->n_coils_cc;
int thread_size = THREAD_BLOCK_SIZE;
dim3 block_dim(thread_size);
dim3 grid_dim(getOptimalGridDim(gi_host->sector_count, 1));
if (DEBUG)
{
printf("adjoint texture convolution requires %ld bytes of shared memory!\n",
shared_mem_size);
printf("grid dim %u, block dim %u \n", grid_dim.x, block_dim.x);
}
if (gi_host->is2Dprocessing)
{
dim3 block_dim(
64, 1,
DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 4 : gi_host->n_coils_cc));
hipLaunchKernelGGL(( textureConvolutionKernel2D) , dim3(grid_dim), dim3(block_dim), shared_mem_size, 0,
data_d, crds_d, gdata_d, sectors_d, sector_centers_d,
gi_host->sector_count);
}
else
hipLaunchKernelGGL(( textureConvolutionKernel) , dim3(grid_dim), dim3(block_dim), shared_mem_size, 0,
data_d, crds_d, gdata_d, sectors_d, sector_centers_d,
gi_host->sector_count);
if (DEBUG)
printf("...finished with: %s\n", hipGetErrorString(hipGetLastError()));
}
void performTextureConvolution(DType2 *data_d, DType *crds_d,
CufftType *gdata_d, DType *kernel_d,
IndType *sectors_d,
IndType2 *sector_processing_order_d,
IndType *sector_centers_d,
gpuNUFFT::GpuNUFFTInfo *gi_host)
{
long shared_mem_size =
(gi_host->sector_dim) * sizeof(DType2) * gi_host->n_coils_cc;
int thread_size = THREAD_BLOCK_SIZE;
dim3 block_dim(thread_size);
dim3 grid_dim(getOptimalGridDim(gi_host->sector_count, 1));
if (DEBUG)
{
printf("adjoint balanced texture convolution requires %ld bytes of shared "
"memory!\n",
shared_mem_size);
printf("grid dim %u, block dim %u \n", grid_dim.x, block_dim.x);
}
if (gi_host->is2Dprocessing)
{
dim3 block_dim(
64, 1,
DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 4 : gi_host->n_coils_cc));
//printf("block dims: %u %u %u!\n", block_dim.x, block_dim.y, block_dim.z);
hipLaunchKernelGGL(( balancedTextureConvolutionKernel2D)
, dim3(grid_dim), dim3(block_dim), shared_mem_size, 0,
data_d, crds_d, gdata_d, sectors_d, sector_processing_order_d,
sector_centers_d, gi_host->sectorsToProcess);
}
else
hipLaunchKernelGGL(( balancedTextureConvolutionKernel) , dim3(grid_dim), dim3(block_dim), shared_mem_size, 0,
data_d, crds_d, gdata_d, sectors_d, sector_processing_order_d,
sector_centers_d, gi_host->sectorsToProcess);
if (DEBUG)
printf("...finished with: %s\n", hipGetErrorString(hipGetLastError()));
}
// ----------------------------------------------------------------------------
// forwardConvolutionKernel: NUFFT kernel
//
// Performs the inverse gpuNUFFT step by convolution of grid points with
// interpolation function and resampling onto trajectory.
//
// parameters:
// * data : complex output sample points
// * crds : coordinates of data points (x,y,z)
// * gdata : input grid data
// * sectors : mapping of sample indices according to each sector
// * sector_centers : coordinates (x,y,z) of sector centers
// * N : number of threads
__device__ void
textureForwardConvolutionFunction(long int *sec, long int sec_max, long int sec_offset,
DType2 *sdata, CufftType *gdata_cache,
DType2 *data, DType *crds, CufftType *gdata,
IndType *sectors, IndType *sector_centers)
{
int ind, imin, imax, jmin, jmax, kmin, kmax, ii, jj, kk;
DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz;
__shared__ IndType3 center;
center.x = sector_centers[sec[threadIdx.x] * 3];
center.y = sector_centers[sec[threadIdx.x] * 3 + 1];
center.z = sector_centers[sec[threadIdx.x] * 3 + 2];
__shared__ long int sector_ind_offset;
sector_ind_offset =
computeXYZ2Lin(center.x - GI.sector_offset, center.y - GI.sector_offset,
center.z - GI.sector_offset, GI.gridDims);
// init sector cache
// preload sector grid data into cache
for (long int ind = threadIdx.x; ind < GI.sector_dim; ind += blockDim.x)
{
long int grid_index;
getCoordsFromIndex(ind, &ii, &jj, &kk, GI.sector_pad_width);
if (isOutlier(ii, jj, kk, center.x, center.y, center.z, GI.gridDims,
GI.sector_offset))
// calculate opposite index
grid_index = computeXYZ2Lin(
calculateOppositeIndex(ii, center.x, GI.gridDims.x, GI.sector_offset),
calculateOppositeIndex(jj, center.y, GI.gridDims.y, GI.sector_offset),
calculateOppositeIndex(kk, center.z, GI.gridDims.z, GI.sector_offset),
GI.gridDims);
else
grid_index = (sector_ind_offset + computeXYZ2Lin(ii, jj, kk, GI.gridDims));
gdata_cache[ind].x = tex1Dfetch(texGDATA, grid_index).x;
gdata_cache[ind].y = tex1Dfetch(texGDATA, grid_index).y;
}
__syncthreads();
// Grid Points over Threads
long int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
while (data_cnt < sec_max)
{
DType3 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
data_point.z = crds[data_cnt + 2 * GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
kz = mapKSpaceToGrid(data_point.z, GI.gridDims.z, center.z,
GI.sector_offset);
set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius);
// convolve neighboring cartesian points to this data point
for (int k = kmin; k <= kmax; k++)
{
kz = mapGridToKSpace(k, GI.gridDims.z, center.z, GI.sector_offset);
dz_sqr = (kz - data_point.z) * GI.aniso_z_scale;
dz_sqr *= dz_sqr;
for (int j = jmin; j <= jmax; j++)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
for (int i = imin; i <= imax; i++)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv,
dz_sqr * GI.radiusSquared_inv);
ind = getIndex(i, j, k, GI.sector_pad_width);
sdata[threadIdx.x].x += gdata_cache[ind].x * val;
sdata[threadIdx.x].y += gdata_cache[ind].y * val;
} // x loop
} // y loop
} // z loop
atomicAdd(&(data[data_cnt].x), sdata[threadIdx.x].x);
atomicAdd(&(data[data_cnt].y), sdata[threadIdx.x].y);
data_cnt = data_cnt + blockDim.x;
sdata[threadIdx.x].x = (DType)0.0; // Re
sdata[threadIdx.x].y = (DType)0.0; // Im
} // data points per sector
}
__global__ void textureForwardConvolutionKernel(CufftType *data, DType *crds,
CufftType *gdata,
IndType *sectors,
IndType *sector_centers, int N)
{
extern __shared__ CufftType shared[]; // externally managed shared memory
CufftType *shared_out_data = (CufftType *)&shared[0];
CufftType *gdata_cache = (CufftType *)&shared[blockDim.x];
__shared__ long int sec[THREAD_BLOCK_SIZE];
sec[threadIdx.x] = blockIdx.x;
// init shared memory
shared_out_data[threadIdx.x].x = (DType)0.0; // Re
shared_out_data[threadIdx.x].y = (DType)0.0; // Im
__syncthreads();
// start convolution
while (sec[threadIdx.x] < N)
{
__shared__ long int data_max;
data_max = sectors[sec[threadIdx.x] + 1];
textureForwardConvolutionFunction(sec, data_max, 0, shared_out_data,
gdata_cache, data, crds, gdata, sectors,
sector_centers);
__syncthreads();
sec[threadIdx.x] = sec[threadIdx.x] + gridDim.x;
} // sector check
}
__global__ void balancedTextureForwardConvolutionKernel(
CufftType *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
extern __shared__ CufftType shared[]; // externally managed shared memory
CufftType *shared_out_data = (CufftType *)&shared[0];
CufftType *gdata_cache = (CufftType *)&shared[blockDim.x];
long int sec_cnt = blockIdx.x;
__shared__ long int sec[THREAD_BLOCK_SIZE];
// init shared memory
shared_out_data[threadIdx.x].x = (DType)0.0; // Re
shared_out_data[threadIdx.x].y = (DType)0.0; // Im
__syncthreads();
// start convolution
while (sec_cnt < N)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
__shared__ long int data_max;
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]] +
sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureForwardConvolutionFunction(
sec, data_max, sector_processing_order[sec_cnt].y, shared_out_data,
gdata_cache, data, crds, gdata, sectors, sector_centers);
__syncthreads();
sec_cnt = sec_cnt + gridDim.x;
} // sector check
}
__device__ void
textureForwardConvolutionFunction2D(int *sec, int sec_max, int sec_offset,
DType2 *sdata, CufftType *gdata_cache,
DType2 *data, DType *crds, CufftType *gdata,
IndType *sectors, IndType *sector_centers)
{
int ind, imin, imax, jmin, jmax, ii, jj;
DType val, ix, jy;
__shared__ IndType2 center;
center.x = sector_centers[sec[threadIdx.x] * 2];
center.y = sector_centers[sec[threadIdx.x] * 2 + 1];
__shared__ int sector_ind_offset;
sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,
center.y - GI.sector_offset, GI.gridDims);
// init sector cache
// preload sector grid data into cache
for (int ind = threadIdx.x; ind < GI.sector_dim; ind += blockDim.x)
{
int grid_index;
getCoordsFromIndex2D(ind, &ii, &jj, GI.sector_pad_width);
// multiply data by current kernel val
// grid complex or scalar
if (isOutlier2D(ii, jj, center.x, center.y, GI.gridDims, GI.sector_offset))
// calculate opposite index
grid_index = getIndex2D(
calculateOppositeIndex(ii, center.x, GI.gridDims.x, GI.sector_offset),
calculateOppositeIndex(jj, center.y, GI.gridDims.y, GI.sector_offset),
GI.gridDims.x);
else
grid_index = (sector_ind_offset + getIndex2D(ii, jj, GI.gridDims.x));
for (int c = 0; c < GI.n_coils_cc; c++)
{
gdata_cache[ind + c * GI.sector_dim].x =
tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).x;
gdata_cache[ind + c * GI.sector_dim].y =
tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).y;
}
}
__syncthreads();
// Grid Points over Threads
int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
while (data_cnt < sec_max)
{
DType2 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
// convolve neighboring cartesian points to this data point
for (int j = jmin; j <= jmax; j++)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
DType dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
for (int i = imin; i <= imax; i++)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
DType dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
// calc as separable filter
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv);
ind = getIndex2D(i, j, GI.sector_pad_width);
for (int c = 0; c < GI.n_coils_cc; c++)
{
sdata[threadIdx.x + c * blockDim.x].x +=
gdata_cache[ind + c * GI.sector_dim].x * val;
sdata[threadIdx.x + c * blockDim.x].y +=
gdata_cache[ind + c * GI.sector_dim].y * val;
}
} // x loop
} // y loop
for (int c = 0; c < GI.n_coils_cc; c++)
{
atomicAdd(&(data[data_cnt + c * GI.data_count].x),
sdata[threadIdx.x + c * blockDim.x].x);
atomicAdd(&(data[data_cnt + c * GI.data_count].y),
sdata[threadIdx.x + c * blockDim.x].y);
sdata[threadIdx.x + c * blockDim.x].x = (DType)0.0; // Re
sdata[threadIdx.x + c * blockDim.x].y = (DType)0.0; // Im
}
data_cnt = data_cnt + blockDim.x;
} // data points per sector
}
__device__ void textureForwardConvolutionFunction22D(
int *sec, int sec_max, int sec_offset, DType2 *data,
DType *crds, CufftType *gdata, IndType *sectors, IndType *sector_centers)
{
int imin, imax, jmin, jmax, i, j;
DType val, ix, jy;
IndType2 center;
int sector_ind_offset;
center.x = sector_centers[sec[threadIdx.x] * 2];
center.y = sector_centers[sec[threadIdx.x] * 2 + 1];
sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,
center.y - GI.sector_offset, GI.gridDims);
// Grid Points over Threads
int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
__syncthreads();
while (data_cnt < sec_max)
{
DType2 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
// convolve neighboring cartesian points to this data point
int rangeX = imax - imin + 1;
int rangeY = jmax - jmin + 1;
int idx = threadIdx.y;
int grid_index;
while (idx < (rangeX * rangeY))
{
getCoordsFromIndex2D(idx, &i, &j, rangeX, rangeY);
i += imin;
j += jmin;
if (j <= jmax && j >= jmin)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
DType dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
if (i <= imax && i >= imin)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
DType dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
// calc as separable filter
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv);
if (isOutlier2D(i, j, center.x, center.y, GI.gridDims,
GI.sector_offset))
// calculate opposite index
grid_index =
getIndex2D(calculateOppositeIndex(i, center.x, GI.gridDims.x,
GI.sector_offset),
calculateOppositeIndex(j, center.y, GI.gridDims.y,
GI.sector_offset),
GI.gridDims.x);
else
grid_index = (sector_ind_offset + getIndex2D(i, j, GI.gridDims.x));
for (int c = 0; c < GI.n_coils_cc; c++)
{
atomicAdd(&(data[data_cnt + c * GI.data_count].x), tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).x * val);
atomicAdd(&(data[data_cnt + c * GI.data_count].y), tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).y * val);
}
} // x if
} // y if
idx = idx + blockDim.y;
}
data_cnt = data_cnt + blockDim.x;
} // data points per sector
}
__device__ void textureForwardConvolutionFunction32D(
int *sec, int sec_max, int sec_offset, DType *cache, DType2 *data,
DType *crds, CufftType *gdata, IndType *sectors, IndType *sector_centers)
{
int imin, imax, jmin, jmax, i, j;
DType val, ix, jy;
__shared__ IndType2 center;
center.x = sector_centers[sec[threadIdx.x] * 2];
center.y = sector_centers[sec[threadIdx.x] * 2 + 1];
__shared__ int sector_ind_offset;
sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,
center.y - GI.sector_offset, GI.gridDims);
int grid_index;
// Grid Points over Threads
int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
while (data_cnt < sec_max)
{
DType2 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
// convolve neighboring cartesian points to this data point
int idx = threadIdx.y;
getCoordsFromIndex2D(idx, &i, &j, GI.kernel_width + 1, GI.kernel_width + 1);
i += imin;
j += jmin;
if (j <= jmax && j >= jmin)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
DType dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
if (i <= imax && i >= imin)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
DType dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
// calc as separable filter
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv);
cache[GI.kernel_widthSquared * threadIdx.x + threadIdx.y] = val;
if (isOutlier2D(i, j, center.x, center.y, GI.gridDims,
GI.sector_offset))
// calculate opposite index
grid_index =
getIndex2D(calculateOppositeIndex(i, center.x, GI.gridDims.x,
GI.sector_offset),
calculateOppositeIndex(j, center.y, GI.gridDims.y,
GI.sector_offset),
GI.gridDims.x);
else
grid_index = (sector_ind_offset + getIndex2D(i, j, GI.gridDims.x));
for (int c = 0; c < GI.n_coils_cc; c++)
{
atomicAdd(
&(data[data_cnt + c * GI.data_count].x),
cache[GI.kernel_widthSquared * threadIdx.x + threadIdx.y] *
tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).x);
atomicAdd(
&(data[data_cnt + c * GI.data_count].y),
cache[GI.kernel_widthSquared * threadIdx.x + threadIdx.y] *
tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).y);
}
} // x if
} // y if
cache[GI.kernel_widthSquared * threadIdx.x + threadIdx.y] = 0;
data_cnt = data_cnt + blockDim.x;
} // data points per sector
}
__global__ void textureForwardConvolutionKernel2D(CufftType *data, DType *crds,
CufftType *gdata,
IndType *sectors,
IndType *sector_centers,
int N)
{
extern __shared__ CufftType shared[]; // externally managed shared memory
CufftType *shared_out_data = (CufftType *)&shared[0];
CufftType *gdata_cache = (CufftType *)&shared[blockDim.x * GI.n_coils_cc];
__shared__ int sec[THREAD_BLOCK_SIZE];
sec[threadIdx.x] = blockIdx.x;
// init shared memory
for (int c = 0; c < GI.n_coils_cc; c++)
{
shared_out_data[threadIdx.x + c * blockDim.x].x = 0.0f; // Re
shared_out_data[threadIdx.x + c * blockDim.x].y = 0.0f; // Im
}
__syncthreads();
// start convolution
while (sec[threadIdx.x] < N)
{
__shared__ int data_max;
data_max = sectors[sec[threadIdx.x] + 1];
textureForwardConvolutionFunction2D(sec, data_max, 0, shared_out_data,
gdata_cache, data, crds, gdata, sectors,
sector_centers);
__syncthreads();
sec[threadIdx.x] = sec[threadIdx.x] + gridDim.x;
} // sector check
}
__global__ void balancedTextureForwardConvolutionKernel2D(
CufftType *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
extern __shared__ CufftType shared[]; // externally managed shared memory
CufftType *shared_out_data = (CufftType *)&shared[0];
CufftType *gdata_cache = (CufftType *)&shared[blockDim.x * GI.n_coils_cc];
__shared__ int sec[THREAD_BLOCK_SIZE];
// init shared memory
for (int c = 0; c < GI.n_coils_cc; c++)
{
shared_out_data[threadIdx.x + c * blockDim.x].x = 0.0f; // Re
shared_out_data[threadIdx.x + c * blockDim.x].y = 0.0f; // Im
}
__syncthreads();
// start convolution
for (int sec_cnt = blockIdx.x; sec_cnt < N; sec_cnt += gridDim.x)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
__shared__ int data_max;
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]] +
sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureForwardConvolutionFunction2D(
sec, data_max, sector_processing_order[sec_cnt].y, shared_out_data,
gdata_cache, data, crds, gdata, sectors, sector_centers);
__syncthreads();
} // sector check
}
__global__ void balancedTextureForwardConvolutionKernel22D(
CufftType *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
int sec_cnt = blockIdx.x;
__shared__ int sec[THREAD_BLOCK_SIZE];
// init shared memory
// start convolution
while (sec_cnt < N)
{
int data_max;
if (threadIdx.y == 0)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
}
__syncthreads();
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]]
+ sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureForwardConvolutionFunction22D(
sec, data_max, sector_processing_order[sec_cnt].y, data, crds,
gdata, sectors, sector_centers);
sec_cnt = sec_cnt + gridDim.x;
__syncthreads();
} // sector check
}
__global__ void balancedTextureForwardConvolutionKernel32D(
CufftType *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
extern __shared__ DType shared_cache[]; // externally managed shared memory
DType *cache = (DType *)&shared_cache[0];
int sec_cnt = blockIdx.x;
__shared__ int sec[THREAD_BLOCK_SIZE];
// init shared memory
cache[threadIdx.x * blockDim.y + threadIdx.y] = (DType)0.0;
__syncthreads();
// start convolution
while (sec_cnt < N)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
__shared__ int data_max;
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]] +
sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureForwardConvolutionFunction32D(
sec, data_max, sector_processing_order[sec_cnt].y, cache, data, crds,
gdata, sectors, sector_centers);
__syncthreads();
sec_cnt = sec_cnt + gridDim.x;
} // sector check
}
void performTextureForwardConvolution(CufftType *data_d, DType *crds_d,
CufftType *gdata_d, DType *kernel_d,
IndType *sectors_d,
IndType *sector_centers_d,
gpuNUFFT::GpuNUFFTInfo *gi_host)
{
int thread_size = 192;
long shared_mem_size = (thread_size + gi_host->sector_dim) *
gi_host->n_coils_cc * sizeof(CufftType);
dim3 block_dim(thread_size);
dim3 grid_dim(getOptimalGridDim(gi_host->sector_count, thread_size));
if (DEBUG)
printf("texture forward convolution requires %ld bytes of shared memory!\n",
shared_mem_size);
if (gi_host->is2Dprocessing)
{
// dim3 block_dim(thread_size, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 1
// : gi_host->n_coils_cc));
dim3 block_dim(thread_size, 1, 1); // DEFAULT_VALUE(gi_host->n_coils_cc > 4
// ? 1 : gi_host->n_coils_cc));
hipLaunchKernelGGL(( textureForwardConvolutionKernel2D)
, dim3(grid_dim), dim3(block_dim), shared_mem_size, 0,
data_d, crds_d, gdata_d, sectors_d, sector_centers_d,
gi_host->sector_count);
}
else
hipLaunchKernelGGL(( textureForwardConvolutionKernel) , dim3(grid_dim), dim3(block_dim), shared_mem_size, 0,
data_d, crds_d, gdata_d, sectors_d, sector_centers_d,
gi_host->sector_count);
}
void performTextureForwardConvolution(CufftType *data_d, DType *crds_d,
CufftType *gdata_d, DType *kernel_d,
IndType *sectors_d,
IndType2 *sector_processing_order_d,
IndType *sector_centers_d,
gpuNUFFT::GpuNUFFTInfo *gi_host)
{
int thread_size = THREAD_BLOCK_SIZE;
long shared_mem_size = (thread_size + gi_host->sector_dim) *
gi_host->n_coils_cc * sizeof(CufftType);
dim3 block_dim(thread_size);
dim3 grid_dim(getOptimalGridDim(gi_host->sector_count, thread_size));
if (DEBUG)
printf("balanced texture forward convolution requires %ld bytes of shared "
"memory!\n",
shared_mem_size);
if (gi_host->is2Dprocessing)
{
bool useV2cached = false;
if (useV2cached)
{
int thread_size = 32;
int threadY = (gi_host->kernel_width + 1) * (gi_host->kernel_width + 1);
long shared_mem_size =
(threadY * thread_size) * sizeof(DType);
grid_dim = dim3(getOptimalGridDim(gi_host->sector_count, 1));
block_dim = getOptimal2DBlockDim(thread_size, threadY);
if (DEBUG)
{
printf("balanced texture forward convolution 2 (2d) requires %ld bytes "
"of shared memory!\n",
shared_mem_size);
printf("block dims: %u %u %u!\n", block_dim.x, block_dim.y, block_dim.z);
printf("grid dims: %u %u %u!\n", grid_dim.x, grid_dim.y, grid_dim.z);
}
hipLaunchKernelGGL(( balancedTextureForwardConvolutionKernel32D), dim3(grid_dim), dim3(block_dim), shared_mem_size, 0,
data_d, crds_d, gdata_d, sectors_d, sector_processing_order_d, sector_centers_d, gi_host->sectorsToProcess);
}
else
{
int thread_size = 32;
long shared_mem_size =
(gi_host->kernel_widthSquared * thread_size) * sizeof(DType);
grid_dim = dim3(getOptimalGridDim(gi_host->sector_count, 1));
//TODO maybe it's better to round kwSqrd to the next multiple of 2
block_dim = getOptimal2DBlockDim(thread_size, gi_host->kernel_widthSquared);
if (DEBUG)
{
printf("balanced texture forward convolution 2 (2d) requires %ld bytes "
"of shared memory!\n",
shared_mem_size);
printf("grid dims: %u %u %u!\n", grid_dim.x, grid_dim.y, grid_dim.z);
printf("block dims: %u %u %u!\n", block_dim.x, block_dim.y, block_dim.z);
}
hipLaunchKernelGGL(( balancedTextureForwardConvolutionKernel22D), dim3(grid_dim), dim3(block_dim), shared_mem_size, 0,
data_d, crds_d, gdata_d, sectors_d, sector_processing_order_d, sector_centers_d, gi_host->sectorsToProcess);
}
}
else
{
hipLaunchKernelGGL(( balancedTextureForwardConvolutionKernel)
, dim3(grid_dim), dim3(block_dim), shared_mem_size, 0,
data_d, crds_d, gdata_d, sectors_d, sector_processing_order_d,
sector_centers_d, gi_host->sectorsToProcess);
}
}
#endif
| 08ee6d0de02cdb3474c8e51a0f7d4114cb67e959.cu | #ifndef TEXTURE_GPUNUFFT_KERNELS_H
#define TEXTURE_GPUNUFFT_KERNELS_H
#include "gpuNUFFT_kernels.hpp"
#include "../std_gpuNUFFT_kernels.cu"
#include "cuda_utils.cuh"
// ----------------------------------------------------------------------------
// convolutionKernel: NUFFT^H kernel
//
// Performs the gpuNUFFT step by convolution of sample points with
// interpolation function and resampling onto grid. Basic concept based on Zwart
// et al.
//
// parameters:
// * data : complex input sample points
// * crds : coordinates of data points (x,y,z)
// * gdata : output grid data
// * sectors : mapping of sample indices according to each sector
// * sector_centers : coordinates (x,y,z) of sector centers
// * temp_gdata : temporary grid data
// * N : number of threads
__device__ void textureConvolutionFunction(int *sec, int sec_max,
int sec_offset, DType2 *sdata,
DType2 *data, DType *crds,
CufftType *gdata, IndType *sectors,
IndType *sector_centers)
{
// start convolution
int ind, x, y, z;
int imin, imax, jmin, jmax, kmin, kmax;
DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz;
__shared__ IndType3 center;
center.x = sector_centers[sec[threadIdx.x] * 3];
center.y = sector_centers[sec[threadIdx.x] * 3 + 1];
center.z = sector_centers[sec[threadIdx.x] * 3 + 2];
// Grid Points over Threads
int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
// loop over all data points of the current sector, and check if grid position
// lies inside
// affected region, if so, add data point weighted to grid position value
while (data_cnt < sec_max)
{
DType3 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
data_point.z = crds[data_cnt + 2 * GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
kz = mapKSpaceToGrid(data_point.z, GI.gridDims.z, center.z,
GI.sector_offset);
set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius);
// grid this point onto its cartesian points neighbors
for (int k = kmin; k <= kmax; k++)
{
kz = mapGridToKSpace(k, GI.gridDims.z, center.z, GI.sector_offset);
dz_sqr = (kz - data_point.z) * GI.aniso_z_scale;
dz_sqr *= dz_sqr;
for (int j = jmin; j <= jmax; j++)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
for (int i = imin; i <= imax; i++)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv,
dz_sqr * GI.radiusSquared_inv);
ind = getIndex(i, j, k, GI.sector_pad_width);
// multiply data by current kernel val
// grid complex or scalar
atomicAdd(&(sdata[ind].x),
val *
tex1Dfetch(texDATA, data_cnt).x);
atomicAdd(&(sdata[ind].y),
val *
tex1Dfetch(texDATA, data_cnt).y);
} // x
} // y
} // z
data_cnt = data_cnt + blockDim.x;
} // grid points per sector
// write shared data to output grid
__syncthreads();
// int sector_ind_offset = sec * GI.sector_dim;
__shared__ int sector_ind_offset;
sector_ind_offset =
computeXYZ2Lin(center.x - GI.sector_offset, center.y - GI.sector_offset,
center.z - GI.sector_offset, GI.gridDims);
// each thread writes one position from shared mem to global mem
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
getCoordsFromIndex(s_ind, &x, &y, &z, GI.sector_pad_width);
if (isOutlier(x, y, z, center.x, center.y, center.z, GI.gridDims,
GI.sector_offset))
// calculate opposite index
ind = computeXYZ2Lin(
calculateOppositeIndex(x, center.x, GI.gridDims.x, GI.sector_offset),
calculateOppositeIndex(y, center.y, GI.gridDims.y, GI.sector_offset),
calculateOppositeIndex(z, center.z, GI.gridDims.z, GI.sector_offset),
GI.gridDims);
else
ind = sector_ind_offset +
computeXYZ2Lin(x, y, z, GI.gridDims); // index in output grid
atomicAdd(&(gdata[ind].x), sdata[s_ind].x); // Re
atomicAdd(&(gdata[ind].y), sdata[s_ind].y); // Im
// reset shared mem
sdata[s_ind].x = (DType)0.0;
sdata[s_ind].y = (DType)0.0;
}
__syncthreads();
}
__global__ void textureConvolutionKernel(DType2 *data, DType *crds,
CufftType *gdata, IndType *sectors,
IndType *sector_centers, int N)
{
extern __shared__ DType2 sdata[]; // externally managed shared memory
// init shared memory
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
sdata[s_ind].x = (DType)0.0; // Re
sdata[s_ind].y = (DType)0.0; // Im
}
__syncthreads();
__shared__ int sec[THREAD_BLOCK_SIZE];
sec[threadIdx.x] = blockIdx.x;
while (sec[threadIdx.x] < N)
{
__shared__ int data_max;
data_max = sectors[sec[threadIdx.x] + 1];
textureConvolutionFunction(sec, data_max, 0, sdata, data, crds, gdata,
sectors, sector_centers);
__syncthreads();
sec[threadIdx.x] = sec[threadIdx.x] + gridDim.x;
} // sec < sector_count
}
__global__ void balancedTextureConvolutionKernel(
DType2 *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
extern __shared__ DType2 sdata[]; // externally managed shared memory
// init shared memory
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
sdata[s_ind].x = (DType)0.0; // Re
sdata[s_ind].y = (DType)0.0; // Im
}
__syncthreads();
int sec_cnt = blockIdx.x;
__shared__ int sec[THREAD_BLOCK_SIZE];
while (sec_cnt < N)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
__shared__ int data_max;
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]] +
sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureConvolutionFunction(sec, data_max,
sector_processing_order[sec_cnt].y, sdata, data,
crds, gdata, sectors, sector_centers);
__syncthreads();
sec_cnt = sec_cnt + gridDim.x;
} // sec < sector_count
}
// ----------------------------------------------------------------------------
// convolutionKernel: NUFFT^H kernel
//
// Performs the gpuNUFFT step by convolution of sample points with
// interpolation function and resampling onto grid. Basic concept based on Zwart
// et al.
//
// parameters:
// * data : complex input sample points
// * crds : coordinates of data points (x,y,z)
// * gdata : output grid data
// * sectors : mapping of sample indices according to each sector
// * sector_centers : coordinates (x,y,z) of sector centers
// * temp_gdata : temporary grid data
// * N : number of threads
__device__ void textureConvolutionFunction2D(DType2 *sdata, int *sec,
int sec_max, int sec_offset,
DType2 *data, DType *crds,
CufftType *gdata, IndType *sectors,
IndType *sector_centers)
{
// start convolution
int ind, x, y;
int imin, imax, jmin, jmax;
DType dx_sqr, dy_sqr, val, ix, jy;
__shared__ IndType2 center;
center.x = sector_centers[sec[threadIdx.x] * 2];
center.y = sector_centers[sec[threadIdx.x] * 2 + 1];
// Grid Points over Threads
int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
// loop over all data points of the current sector, and check if grid position
// lies inside
// affected region, if so, add data point weighted to grid position value
while (data_cnt < sec_max)
{
DType2 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
// grid this point onto its cartesian points neighbors
for (int j = jmin; j <= jmax; j++)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
for (int i = imin; i <= imax; i++)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
// Calculate Separable Filters
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv);
ind = getIndex2D(i, j, GI.sector_pad_width);
// multiply data by current kernel val
// grid complex or scalar
for (int c = threadIdx.z; c < GI.n_coils_cc; c += blockDim.z)
{
atomicAdd(&(sdata[ind + c * GI.sector_dim].x),
val * tex1Dfetch(texDATA, data_cnt + c * GI.data_count).x);
atomicAdd(&(sdata[ind + c * GI.sector_dim].y),
val * tex1Dfetch(texDATA, data_cnt + c * GI.data_count).y);
}
} // x
} // y
data_cnt = data_cnt + blockDim.x;
} // grid points per sector
// write shared data to output grid
__syncthreads();
// int sector_ind_offset = sec * GI.sector_dim;
__shared__ int sector_ind_offset;
sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,
center.y - GI.sector_offset, GI.gridDims);
// each thread writes one position from shared mem to global mem
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
getCoordsFromIndex2D(s_ind, &x, &y, GI.sector_pad_width);
if (isOutlier2D(x, y, center.x, center.y, GI.gridDims, GI.sector_offset))
// calculate opposite index
ind = computeXY2Lin(
calculateOppositeIndex(x, center.x, GI.gridDims.x, GI.sector_offset),
calculateOppositeIndex(y, center.y, GI.gridDims.y, GI.sector_offset),
GI.gridDims);
else
ind = sector_ind_offset +
computeXY2Lin(x, y, GI.gridDims); // index in output grid
for (int c = threadIdx.z; c < GI.n_coils_cc; c += blockDim.z)
{
atomicAdd(&(gdata[ind + c * GI.gridDims_count].x),
sdata[s_ind + c * GI.sector_dim].x); // Re
atomicAdd(&(gdata[ind + c * GI.gridDims_count].y),
sdata[s_ind + c * GI.sector_dim].y); // Im
// reset shared mem
sdata[s_ind + c * GI.sector_dim].x = (DType)0.0;
sdata[s_ind + c * GI.sector_dim].y = (DType)0.0;
}
}
}
__global__ void textureConvolutionKernel2D(DType2 *data, DType *crds,
CufftType *gdata, IndType *sectors,
IndType *sector_centers, int N)
{
extern __shared__ DType2 sdata[]; // externally managed shared memory
// init shared memory
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
for (int c = threadIdx.z; c < GI.n_coils_cc; c += blockDim.z)
{
sdata[s_ind + c * GI.sector_dim].x = 0.0f; // Re
sdata[s_ind + c * GI.sector_dim].y = 0.0f; // Im
}
}
__syncthreads();
__shared__ int sec[THREAD_BLOCK_SIZE];
sec[threadIdx.x] = blockIdx.x;
while (sec[threadIdx.x] < N)
{
__shared__ int data_max;
data_max = sectors[sec[threadIdx.x] + 1];
textureConvolutionFunction2D(sdata, sec, data_max, 0, data, crds, gdata,
sectors, sector_centers);
__syncthreads();
sec[threadIdx.x] = sec[threadIdx.x] + gridDim.x;
} // sec < sector_count
}
__global__ void balancedTextureConvolutionKernel2D(
DType2 *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
extern __shared__ DType2 sdata[]; // externally managed shared memory
// init shared memory
for (int s_ind = threadIdx.x; s_ind < GI.sector_dim; s_ind += blockDim.x)
{
for (int c = threadIdx.z; c < GI.n_coils_cc; c += blockDim.z)
{
sdata[s_ind + c * GI.sector_dim].x = 0.0f; // Re
sdata[s_ind + c * GI.sector_dim].y = 0.0f; // Im
}
}
__syncthreads();
int sec_cnt = blockIdx.x;
__shared__ int sec[THREAD_BLOCK_SIZE];
while (sec_cnt < N)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
__shared__ int data_max;
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]]
+ sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureConvolutionFunction2D(sdata, sec, data_max,
sector_processing_order[sec_cnt].y, data, crds,
gdata, sectors, sector_centers);
__syncthreads();
sec_cnt = sec_cnt + gridDim.x;
} // sec < sector_count
}
void performTextureConvolution(DType2 *data_d, DType *crds_d,
CufftType *gdata_d, DType *kernel_d,
IndType *sectors_d, IndType *sector_centers_d,
gpuNUFFT::GpuNUFFTInfo *gi_host)
{
long shared_mem_size =
(gi_host->sector_dim) * sizeof(DType2) * gi_host->n_coils_cc;
int thread_size = THREAD_BLOCK_SIZE;
dim3 block_dim(thread_size);
dim3 grid_dim(getOptimalGridDim(gi_host->sector_count, 1));
if (DEBUG)
{
printf("adjoint texture convolution requires %ld bytes of shared memory!\n",
shared_mem_size);
printf("grid dim %u, block dim %u \n", grid_dim.x, block_dim.x);
}
if (gi_host->is2Dprocessing)
{
dim3 block_dim(
64, 1,
DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 4 : gi_host->n_coils_cc));
textureConvolutionKernel2D <<<grid_dim, block_dim, shared_mem_size>>>
(data_d, crds_d, gdata_d, sectors_d, sector_centers_d,
gi_host->sector_count);
}
else
textureConvolutionKernel <<<grid_dim, block_dim, shared_mem_size>>>
(data_d, crds_d, gdata_d, sectors_d, sector_centers_d,
gi_host->sector_count);
if (DEBUG)
printf("...finished with: %s\n", cudaGetErrorString(cudaGetLastError()));
}
void performTextureConvolution(DType2 *data_d, DType *crds_d,
CufftType *gdata_d, DType *kernel_d,
IndType *sectors_d,
IndType2 *sector_processing_order_d,
IndType *sector_centers_d,
gpuNUFFT::GpuNUFFTInfo *gi_host)
{
long shared_mem_size =
(gi_host->sector_dim) * sizeof(DType2) * gi_host->n_coils_cc;
int thread_size = THREAD_BLOCK_SIZE;
dim3 block_dim(thread_size);
dim3 grid_dim(getOptimalGridDim(gi_host->sector_count, 1));
if (DEBUG)
{
printf("adjoint balanced texture convolution requires %ld bytes of shared "
"memory!\n",
shared_mem_size);
printf("grid dim %u, block dim %u \n", grid_dim.x, block_dim.x);
}
if (gi_host->is2Dprocessing)
{
dim3 block_dim(
64, 1,
DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 4 : gi_host->n_coils_cc));
//printf("block dims: %u %u %u!\n", block_dim.x, block_dim.y, block_dim.z);
balancedTextureConvolutionKernel2D
<<<grid_dim, block_dim, shared_mem_size>>>
(data_d, crds_d, gdata_d, sectors_d, sector_processing_order_d,
sector_centers_d, gi_host->sectorsToProcess);
}
else
balancedTextureConvolutionKernel <<<grid_dim, block_dim, shared_mem_size>>>
(data_d, crds_d, gdata_d, sectors_d, sector_processing_order_d,
sector_centers_d, gi_host->sectorsToProcess);
if (DEBUG)
printf("...finished with: %s\n", cudaGetErrorString(cudaGetLastError()));
}
// ----------------------------------------------------------------------------
// forwardConvolutionKernel: NUFFT kernel
//
// Performs the inverse gpuNUFFT step by convolution of grid points with
// interpolation function and resampling onto trajectory.
//
// parameters:
// * data : complex output sample points
// * crds : coordinates of data points (x,y,z)
// * gdata : input grid data
// * sectors : mapping of sample indices according to each sector
// * sector_centers : coordinates (x,y,z) of sector centers
// * N : number of threads
__device__ void
textureForwardConvolutionFunction(long int *sec, long int sec_max, long int sec_offset,
DType2 *sdata, CufftType *gdata_cache,
DType2 *data, DType *crds, CufftType *gdata,
IndType *sectors, IndType *sector_centers)
{
int ind, imin, imax, jmin, jmax, kmin, kmax, ii, jj, kk;
DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz;
__shared__ IndType3 center;
center.x = sector_centers[sec[threadIdx.x] * 3];
center.y = sector_centers[sec[threadIdx.x] * 3 + 1];
center.z = sector_centers[sec[threadIdx.x] * 3 + 2];
__shared__ long int sector_ind_offset;
sector_ind_offset =
computeXYZ2Lin(center.x - GI.sector_offset, center.y - GI.sector_offset,
center.z - GI.sector_offset, GI.gridDims);
// init sector cache
// preload sector grid data into cache
for (long int ind = threadIdx.x; ind < GI.sector_dim; ind += blockDim.x)
{
long int grid_index;
getCoordsFromIndex(ind, &ii, &jj, &kk, GI.sector_pad_width);
if (isOutlier(ii, jj, kk, center.x, center.y, center.z, GI.gridDims,
GI.sector_offset))
// calculate opposite index
grid_index = computeXYZ2Lin(
calculateOppositeIndex(ii, center.x, GI.gridDims.x, GI.sector_offset),
calculateOppositeIndex(jj, center.y, GI.gridDims.y, GI.sector_offset),
calculateOppositeIndex(kk, center.z, GI.gridDims.z, GI.sector_offset),
GI.gridDims);
else
grid_index = (sector_ind_offset + computeXYZ2Lin(ii, jj, kk, GI.gridDims));
gdata_cache[ind].x = tex1Dfetch(texGDATA, grid_index).x;
gdata_cache[ind].y = tex1Dfetch(texGDATA, grid_index).y;
}
__syncthreads();
// Grid Points over Threads
long int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
while (data_cnt < sec_max)
{
DType3 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
data_point.z = crds[data_cnt + 2 * GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
kz = mapKSpaceToGrid(data_point.z, GI.gridDims.z, center.z,
GI.sector_offset);
set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius);
// convolve neighboring cartesian points to this data point
for (int k = kmin; k <= kmax; k++)
{
kz = mapGridToKSpace(k, GI.gridDims.z, center.z, GI.sector_offset);
dz_sqr = (kz - data_point.z) * GI.aniso_z_scale;
dz_sqr *= dz_sqr;
for (int j = jmin; j <= jmax; j++)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
for (int i = imin; i <= imax; i++)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv,
dz_sqr * GI.radiusSquared_inv);
ind = getIndex(i, j, k, GI.sector_pad_width);
sdata[threadIdx.x].x += gdata_cache[ind].x * val;
sdata[threadIdx.x].y += gdata_cache[ind].y * val;
} // x loop
} // y loop
} // z loop
atomicAdd(&(data[data_cnt].x), sdata[threadIdx.x].x);
atomicAdd(&(data[data_cnt].y), sdata[threadIdx.x].y);
data_cnt = data_cnt + blockDim.x;
sdata[threadIdx.x].x = (DType)0.0; // Re
sdata[threadIdx.x].y = (DType)0.0; // Im
} // data points per sector
}
__global__ void textureForwardConvolutionKernel(CufftType *data, DType *crds,
CufftType *gdata,
IndType *sectors,
IndType *sector_centers, int N)
{
extern __shared__ CufftType shared[]; // externally managed shared memory
CufftType *shared_out_data = (CufftType *)&shared[0];
CufftType *gdata_cache = (CufftType *)&shared[blockDim.x];
__shared__ long int sec[THREAD_BLOCK_SIZE];
sec[threadIdx.x] = blockIdx.x;
// init shared memory
shared_out_data[threadIdx.x].x = (DType)0.0; // Re
shared_out_data[threadIdx.x].y = (DType)0.0; // Im
__syncthreads();
// start convolution
while (sec[threadIdx.x] < N)
{
__shared__ long int data_max;
data_max = sectors[sec[threadIdx.x] + 1];
textureForwardConvolutionFunction(sec, data_max, 0, shared_out_data,
gdata_cache, data, crds, gdata, sectors,
sector_centers);
__syncthreads();
sec[threadIdx.x] = sec[threadIdx.x] + gridDim.x;
} // sector check
}
__global__ void balancedTextureForwardConvolutionKernel(
CufftType *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
extern __shared__ CufftType shared[]; // externally managed shared memory
CufftType *shared_out_data = (CufftType *)&shared[0];
CufftType *gdata_cache = (CufftType *)&shared[blockDim.x];
long int sec_cnt = blockIdx.x;
__shared__ long int sec[THREAD_BLOCK_SIZE];
// init shared memory
shared_out_data[threadIdx.x].x = (DType)0.0; // Re
shared_out_data[threadIdx.x].y = (DType)0.0; // Im
__syncthreads();
// start convolution
while (sec_cnt < N)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
__shared__ long int data_max;
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]] +
sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureForwardConvolutionFunction(
sec, data_max, sector_processing_order[sec_cnt].y, shared_out_data,
gdata_cache, data, crds, gdata, sectors, sector_centers);
__syncthreads();
sec_cnt = sec_cnt + gridDim.x;
} // sector check
}
__device__ void
textureForwardConvolutionFunction2D(int *sec, int sec_max, int sec_offset,
DType2 *sdata, CufftType *gdata_cache,
DType2 *data, DType *crds, CufftType *gdata,
IndType *sectors, IndType *sector_centers)
{
int ind, imin, imax, jmin, jmax, ii, jj;
DType val, ix, jy;
__shared__ IndType2 center;
center.x = sector_centers[sec[threadIdx.x] * 2];
center.y = sector_centers[sec[threadIdx.x] * 2 + 1];
__shared__ int sector_ind_offset;
sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,
center.y - GI.sector_offset, GI.gridDims);
// init sector cache
// preload sector grid data into cache
for (int ind = threadIdx.x; ind < GI.sector_dim; ind += blockDim.x)
{
int grid_index;
getCoordsFromIndex2D(ind, &ii, &jj, GI.sector_pad_width);
// multiply data by current kernel val
// grid complex or scalar
if (isOutlier2D(ii, jj, center.x, center.y, GI.gridDims, GI.sector_offset))
// calculate opposite index
grid_index = getIndex2D(
calculateOppositeIndex(ii, center.x, GI.gridDims.x, GI.sector_offset),
calculateOppositeIndex(jj, center.y, GI.gridDims.y, GI.sector_offset),
GI.gridDims.x);
else
grid_index = (sector_ind_offset + getIndex2D(ii, jj, GI.gridDims.x));
for (int c = 0; c < GI.n_coils_cc; c++)
{
gdata_cache[ind + c * GI.sector_dim].x =
tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).x;
gdata_cache[ind + c * GI.sector_dim].y =
tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).y;
}
}
__syncthreads();
// Grid Points over Threads
int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
while (data_cnt < sec_max)
{
DType2 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
// convolve neighboring cartesian points to this data point
for (int j = jmin; j <= jmax; j++)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
DType dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
for (int i = imin; i <= imax; i++)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
DType dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
// calc as separable filter
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv);
ind = getIndex2D(i, j, GI.sector_pad_width);
for (int c = 0; c < GI.n_coils_cc; c++)
{
sdata[threadIdx.x + c * blockDim.x].x +=
gdata_cache[ind + c * GI.sector_dim].x * val;
sdata[threadIdx.x + c * blockDim.x].y +=
gdata_cache[ind + c * GI.sector_dim].y * val;
}
} // x loop
} // y loop
for (int c = 0; c < GI.n_coils_cc; c++)
{
atomicAdd(&(data[data_cnt + c * GI.data_count].x),
sdata[threadIdx.x + c * blockDim.x].x);
atomicAdd(&(data[data_cnt + c * GI.data_count].y),
sdata[threadIdx.x + c * blockDim.x].y);
sdata[threadIdx.x + c * blockDim.x].x = (DType)0.0; // Re
sdata[threadIdx.x + c * blockDim.x].y = (DType)0.0; // Im
}
data_cnt = data_cnt + blockDim.x;
} // data points per sector
}
__device__ void textureForwardConvolutionFunction22D(
int *sec, int sec_max, int sec_offset, DType2 *data,
DType *crds, CufftType *gdata, IndType *sectors, IndType *sector_centers)
{
int imin, imax, jmin, jmax, i, j;
DType val, ix, jy;
IndType2 center;
int sector_ind_offset;
center.x = sector_centers[sec[threadIdx.x] * 2];
center.y = sector_centers[sec[threadIdx.x] * 2 + 1];
sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,
center.y - GI.sector_offset, GI.gridDims);
// Grid Points over Threads
int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
__syncthreads();
while (data_cnt < sec_max)
{
DType2 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
// convolve neighboring cartesian points to this data point
int rangeX = imax - imin + 1;
int rangeY = jmax - jmin + 1;
int idx = threadIdx.y;
int grid_index;
while (idx < (rangeX * rangeY))
{
getCoordsFromIndex2D(idx, &i, &j, rangeX, rangeY);
i += imin;
j += jmin;
if (j <= jmax && j >= jmin)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
DType dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
if (i <= imax && i >= imin)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
DType dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
// calc as separable filter
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv);
if (isOutlier2D(i, j, center.x, center.y, GI.gridDims,
GI.sector_offset))
// calculate opposite index
grid_index =
getIndex2D(calculateOppositeIndex(i, center.x, GI.gridDims.x,
GI.sector_offset),
calculateOppositeIndex(j, center.y, GI.gridDims.y,
GI.sector_offset),
GI.gridDims.x);
else
grid_index = (sector_ind_offset + getIndex2D(i, j, GI.gridDims.x));
for (int c = 0; c < GI.n_coils_cc; c++)
{
atomicAdd(&(data[data_cnt + c * GI.data_count].x), tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).x * val);
atomicAdd(&(data[data_cnt + c * GI.data_count].y), tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).y * val);
}
} // x if
} // y if
idx = idx + blockDim.y;
}
data_cnt = data_cnt + blockDim.x;
} // data points per sector
}
__device__ void textureForwardConvolutionFunction32D(
int *sec, int sec_max, int sec_offset, DType *cache, DType2 *data,
DType *crds, CufftType *gdata, IndType *sectors, IndType *sector_centers)
{
int imin, imax, jmin, jmax, i, j;
DType val, ix, jy;
__shared__ IndType2 center;
center.x = sector_centers[sec[threadIdx.x] * 2];
center.y = sector_centers[sec[threadIdx.x] * 2 + 1];
__shared__ int sector_ind_offset;
sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,
center.y - GI.sector_offset, GI.gridDims);
int grid_index;
// Grid Points over Threads
int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset;
while (data_cnt < sec_max)
{
DType2 data_point; // datapoint per thread
data_point.x = crds[data_cnt];
data_point.y = crds[data_cnt + GI.data_count];
// set the boundaries of final dataset for gpuNUFFT this point
ix = mapKSpaceToGrid(data_point.x, GI.gridDims.x, center.x,
GI.sector_offset);
set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius);
jy = mapKSpaceToGrid(data_point.y, GI.gridDims.y, center.y,
GI.sector_offset);
set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius);
// convolve neighboring cartesian points to this data point
int idx = threadIdx.y;
getCoordsFromIndex2D(idx, &i, &j, GI.kernel_width + 1, GI.kernel_width + 1);
i += imin;
j += jmin;
if (j <= jmax && j >= jmin)
{
jy = mapGridToKSpace(j, GI.gridDims.y, center.y, GI.sector_offset);
DType dy_sqr = (jy - data_point.y) * GI.aniso_y_scale;
dy_sqr *= dy_sqr;
if (i <= imax && i >= imin)
{
ix = mapGridToKSpace(i, GI.gridDims.x, center.x, GI.sector_offset);
DType dx_sqr = (ix - data_point.x) * GI.aniso_x_scale;
dx_sqr *= dx_sqr;
// get kernel value
// calc as separable filter
val = computeTextureLookup(dx_sqr * GI.radiusSquared_inv,
dy_sqr * GI.radiusSquared_inv);
cache[GI.kernel_widthSquared * threadIdx.x + threadIdx.y] = val;
if (isOutlier2D(i, j, center.x, center.y, GI.gridDims,
GI.sector_offset))
// calculate opposite index
grid_index =
getIndex2D(calculateOppositeIndex(i, center.x, GI.gridDims.x,
GI.sector_offset),
calculateOppositeIndex(j, center.y, GI.gridDims.y,
GI.sector_offset),
GI.gridDims.x);
else
grid_index = (sector_ind_offset + getIndex2D(i, j, GI.gridDims.x));
for (int c = 0; c < GI.n_coils_cc; c++)
{
atomicAdd(
&(data[data_cnt + c * GI.data_count].x),
cache[GI.kernel_widthSquared * threadIdx.x + threadIdx.y] *
tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).x);
atomicAdd(
&(data[data_cnt + c * GI.data_count].y),
cache[GI.kernel_widthSquared * threadIdx.x + threadIdx.y] *
tex1Dfetch(texGDATA, grid_index + c * GI.gridDims_count).y);
}
} // x if
} // y if
cache[GI.kernel_widthSquared * threadIdx.x + threadIdx.y] = 0;
data_cnt = data_cnt + blockDim.x;
} // data points per sector
}
__global__ void textureForwardConvolutionKernel2D(CufftType *data, DType *crds,
CufftType *gdata,
IndType *sectors,
IndType *sector_centers,
int N)
{
extern __shared__ CufftType shared[]; // externally managed shared memory
CufftType *shared_out_data = (CufftType *)&shared[0];
CufftType *gdata_cache = (CufftType *)&shared[blockDim.x * GI.n_coils_cc];
__shared__ int sec[THREAD_BLOCK_SIZE];
sec[threadIdx.x] = blockIdx.x;
// init shared memory
for (int c = 0; c < GI.n_coils_cc; c++)
{
shared_out_data[threadIdx.x + c * blockDim.x].x = 0.0f; // Re
shared_out_data[threadIdx.x + c * blockDim.x].y = 0.0f; // Im
}
__syncthreads();
// start convolution
while (sec[threadIdx.x] < N)
{
__shared__ int data_max;
data_max = sectors[sec[threadIdx.x] + 1];
textureForwardConvolutionFunction2D(sec, data_max, 0, shared_out_data,
gdata_cache, data, crds, gdata, sectors,
sector_centers);
__syncthreads();
sec[threadIdx.x] = sec[threadIdx.x] + gridDim.x;
} // sector check
}
__global__ void balancedTextureForwardConvolutionKernel2D(
CufftType *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
extern __shared__ CufftType shared[]; // externally managed shared memory
CufftType *shared_out_data = (CufftType *)&shared[0];
CufftType *gdata_cache = (CufftType *)&shared[blockDim.x * GI.n_coils_cc];
__shared__ int sec[THREAD_BLOCK_SIZE];
// init shared memory
for (int c = 0; c < GI.n_coils_cc; c++)
{
shared_out_data[threadIdx.x + c * blockDim.x].x = 0.0f; // Re
shared_out_data[threadIdx.x + c * blockDim.x].y = 0.0f; // Im
}
__syncthreads();
// start convolution
for (int sec_cnt = blockIdx.x; sec_cnt < N; sec_cnt += gridDim.x)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
__shared__ int data_max;
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]] +
sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureForwardConvolutionFunction2D(
sec, data_max, sector_processing_order[sec_cnt].y, shared_out_data,
gdata_cache, data, crds, gdata, sectors, sector_centers);
__syncthreads();
} // sector check
}
__global__ void balancedTextureForwardConvolutionKernel22D(
CufftType *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
int sec_cnt = blockIdx.x;
__shared__ int sec[THREAD_BLOCK_SIZE];
// init shared memory
// start convolution
while (sec_cnt < N)
{
int data_max;
if (threadIdx.y == 0)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
}
__syncthreads();
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]]
+ sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureForwardConvolutionFunction22D(
sec, data_max, sector_processing_order[sec_cnt].y, data, crds,
gdata, sectors, sector_centers);
sec_cnt = sec_cnt + gridDim.x;
__syncthreads();
} // sector check
}
__global__ void balancedTextureForwardConvolutionKernel32D(
CufftType *data, DType *crds, CufftType *gdata, IndType *sectors,
IndType2 *sector_processing_order, IndType *sector_centers, int N)
{
extern __shared__ DType shared_cache[]; // externally managed shared memory
DType *cache = (DType *)&shared_cache[0];
int sec_cnt = blockIdx.x;
__shared__ int sec[THREAD_BLOCK_SIZE];
// init shared memory
cache[threadIdx.x * blockDim.y + threadIdx.y] = (DType)0.0;
__syncthreads();
// start convolution
while (sec_cnt < N)
{
sec[threadIdx.x] = sector_processing_order[sec_cnt].x;
__shared__ int data_max;
data_max = min(sectors[sec[threadIdx.x] + 1],
sectors[sec[threadIdx.x]] +
sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD);
textureForwardConvolutionFunction32D(
sec, data_max, sector_processing_order[sec_cnt].y, cache, data, crds,
gdata, sectors, sector_centers);
__syncthreads();
sec_cnt = sec_cnt + gridDim.x;
} // sector check
}
void performTextureForwardConvolution(CufftType *data_d, DType *crds_d,
CufftType *gdata_d, DType *kernel_d,
IndType *sectors_d,
IndType *sector_centers_d,
gpuNUFFT::GpuNUFFTInfo *gi_host)
{
int thread_size = 192;
long shared_mem_size = (thread_size + gi_host->sector_dim) *
gi_host->n_coils_cc * sizeof(CufftType);
dim3 block_dim(thread_size);
dim3 grid_dim(getOptimalGridDim(gi_host->sector_count, thread_size));
if (DEBUG)
printf("texture forward convolution requires %ld bytes of shared memory!\n",
shared_mem_size);
if (gi_host->is2Dprocessing)
{
// dim3 block_dim(thread_size, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 1
// : gi_host->n_coils_cc));
dim3 block_dim(thread_size, 1, 1); // DEFAULT_VALUE(gi_host->n_coils_cc > 4
// ? 1 : gi_host->n_coils_cc));
textureForwardConvolutionKernel2D
<<<grid_dim, block_dim, shared_mem_size>>>
(data_d, crds_d, gdata_d, sectors_d, sector_centers_d,
gi_host->sector_count);
}
else
textureForwardConvolutionKernel <<<grid_dim, block_dim, shared_mem_size>>>
(data_d, crds_d, gdata_d, sectors_d, sector_centers_d,
gi_host->sector_count);
}
void performTextureForwardConvolution(CufftType *data_d, DType *crds_d,
CufftType *gdata_d, DType *kernel_d,
IndType *sectors_d,
IndType2 *sector_processing_order_d,
IndType *sector_centers_d,
gpuNUFFT::GpuNUFFTInfo *gi_host)
{
int thread_size = THREAD_BLOCK_SIZE;
long shared_mem_size = (thread_size + gi_host->sector_dim) *
gi_host->n_coils_cc * sizeof(CufftType);
dim3 block_dim(thread_size);
dim3 grid_dim(getOptimalGridDim(gi_host->sector_count, thread_size));
if (DEBUG)
printf("balanced texture forward convolution requires %ld bytes of shared "
"memory!\n",
shared_mem_size);
if (gi_host->is2Dprocessing)
{
bool useV2cached = false;
if (useV2cached)
{
int thread_size = 32;
int threadY = (gi_host->kernel_width + 1) * (gi_host->kernel_width + 1);
long shared_mem_size =
(threadY * thread_size) * sizeof(DType);
grid_dim = dim3(getOptimalGridDim(gi_host->sector_count, 1));
block_dim = getOptimal2DBlockDim(thread_size, threadY);
if (DEBUG)
{
printf("balanced texture forward convolution 2 (2d) requires %ld bytes "
"of shared memory!\n",
shared_mem_size);
printf("block dims: %u %u %u!\n", block_dim.x, block_dim.y, block_dim.z);
printf("grid dims: %u %u %u!\n", grid_dim.x, grid_dim.y, grid_dim.z);
}
balancedTextureForwardConvolutionKernel32D<<<grid_dim, block_dim, shared_mem_size>>>
(data_d, crds_d, gdata_d, sectors_d, sector_processing_order_d, sector_centers_d, gi_host->sectorsToProcess);
}
else
{
int thread_size = 32;
long shared_mem_size =
(gi_host->kernel_widthSquared * thread_size) * sizeof(DType);
grid_dim = dim3(getOptimalGridDim(gi_host->sector_count, 1));
//TODO maybe it's better to round kwSqrd to the next multiple of 2
block_dim = getOptimal2DBlockDim(thread_size, gi_host->kernel_widthSquared);
if (DEBUG)
{
printf("balanced texture forward convolution 2 (2d) requires %ld bytes "
"of shared memory!\n",
shared_mem_size);
printf("grid dims: %u %u %u!\n", grid_dim.x, grid_dim.y, grid_dim.z);
printf("block dims: %u %u %u!\n", block_dim.x, block_dim.y, block_dim.z);
}
balancedTextureForwardConvolutionKernel22D<<<grid_dim, block_dim, shared_mem_size>>>
(data_d, crds_d, gdata_d, sectors_d, sector_processing_order_d, sector_centers_d, gi_host->sectorsToProcess);
}
}
else
{
balancedTextureForwardConvolutionKernel
<<<grid_dim, block_dim, shared_mem_size>>>
(data_d, crds_d, gdata_d, sectors_d, sector_processing_order_d,
sector_centers_d, gi_host->sectorsToProcess);
}
}
#endif
|
0e26d56a75c8adf5c1c2338cf4e8399768a42222.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "copy_kernel_frombuf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *dest = NULL;
hipMalloc(&dest, XSIZE*YSIZE);
char *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
int rx_s = 1;
int rx_e = 1;
int ry_s = 1;
int ry_e = 1;
int rz_s = 1;
int rz_e = 1;
int x_step = 1;
int y_step = 1;
int z_step = 1;
int size_x = XSIZE*YSIZE;
int size_y = XSIZE*YSIZE;
int size_z = XSIZE*YSIZE;
int buf_strides_x = 2;
int buf_strides_y = 2;
int buf_strides_z = 2;
int type_size = XSIZE*YSIZE;
int dim = 2;
int OPS_soa = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
copy_kernel_frombuf), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,src,rx_s,rx_e,ry_s,ry_e,rz_s,rz_e,x_step,y_step,z_step,size_x,size_y,size_z,buf_strides_x,buf_strides_y,buf_strides_z,type_size,dim,OPS_soa);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
copy_kernel_frombuf), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,src,rx_s,rx_e,ry_s,ry_e,rz_s,rz_e,x_step,y_step,z_step,size_x,size_y,size_z,buf_strides_x,buf_strides_y,buf_strides_z,type_size,dim,OPS_soa);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
copy_kernel_frombuf), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,src,rx_s,rx_e,ry_s,ry_e,rz_s,rz_e,x_step,y_step,z_step,size_x,size_y,size_z,buf_strides_x,buf_strides_y,buf_strides_z,type_size,dim,OPS_soa);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0e26d56a75c8adf5c1c2338cf4e8399768a42222.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "copy_kernel_frombuf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *dest = NULL;
cudaMalloc(&dest, XSIZE*YSIZE);
char *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
int rx_s = 1;
int rx_e = 1;
int ry_s = 1;
int ry_e = 1;
int rz_s = 1;
int rz_e = 1;
int x_step = 1;
int y_step = 1;
int z_step = 1;
int size_x = XSIZE*YSIZE;
int size_y = XSIZE*YSIZE;
int size_z = XSIZE*YSIZE;
int buf_strides_x = 2;
int buf_strides_y = 2;
int buf_strides_z = 2;
int type_size = XSIZE*YSIZE;
int dim = 2;
int OPS_soa = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
copy_kernel_frombuf<<<gridBlock,threadBlock>>>(dest,src,rx_s,rx_e,ry_s,ry_e,rz_s,rz_e,x_step,y_step,z_step,size_x,size_y,size_z,buf_strides_x,buf_strides_y,buf_strides_z,type_size,dim,OPS_soa);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
copy_kernel_frombuf<<<gridBlock,threadBlock>>>(dest,src,rx_s,rx_e,ry_s,ry_e,rz_s,rz_e,x_step,y_step,z_step,size_x,size_y,size_z,buf_strides_x,buf_strides_y,buf_strides_z,type_size,dim,OPS_soa);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
copy_kernel_frombuf<<<gridBlock,threadBlock>>>(dest,src,rx_s,rx_e,ry_s,ry_e,rz_s,rz_e,x_step,y_step,z_step,size_x,size_y,size_z,buf_strides_x,buf_strides_y,buf_strides_z,type_size,dim,OPS_soa);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b0a3e863a5743ca973ea8a1e8b63793b039a183b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision:$
// $Date:$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt in
// the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* debugging.cu
*
* @brief Debugging/statistics/performance utilities for hash tables.
*/
#include "debugging.h"
#include "definitions.h"
#include "cuHash64.cuh"
#include <algorithm>
#include "cuda_util.h"
namespace CudaHT
{
namespace CuckooHashing{
//! Debugging function: Takes statistics on the hash functions' distribution.
/*! Determines:
* - How many unique slots each key has.
* - How many keys hash into each slot.
* - Whether any keys failed to get a full set of slots.
*/
__global__ void take_hash_function_statistics_kernel(
const unsigned long long *keys,
const unsigned n_entries,
const unsigned table_size,
const uint2 *constants,
const unsigned num_functions,
unsigned *num_slots_available,
unsigned *num_hashing_in,
unsigned *failed)
{
unsigned thread_index = threadIdx.x + blockIdx.x * blockDim.x +
blockIdx.y * blockDim.x * gridDim.x;
if (thread_index >= n_entries)
return;
unsigned long long key = keys[thread_index];
// Determine all of the locations the key hashes into.
// Also count how many keys hash into each location.
unsigned locations[kMaxHashFunctions];
for (unsigned i = 0; i < num_functions; ++i) {
locations[i] =
hash_function_inner(constants[i], key) % table_size;
if (num_hashing_in != NULL) {
atomicAdd(num_hashing_in + locations[i], 1);
}
}
// Determine whether all of the locations were different.
unsigned num_slots = 1;
for (unsigned i = 1; i < num_functions; ++i) {
bool matched = false;
for (unsigned j = 0; j < i; ++j) {
if (locations[i] == locations[j]) {
matched = true;
break;
}
}
if (!matched) {
num_slots++;
}
}
if (num_slots_available != NULL) {
num_slots_available[thread_index] = num_slots;
}
if (failed != NULL && num_slots != num_functions) {
*failed = 1;
}
}
void TakeHashFunctionStatistics(const unsigned num_keys,
const unsigned long long *d_keys,
const unsigned table_size,
const uint2 *constants,
const unsigned kNumHashFunctions)
{
char buffer[16000];
PrintMessage("Hash function constants: ");
for (unsigned i = 0; i < kNumHashFunctions; ++i) {
sprintf(buffer, "\t%10u, %10u", constants[i].x, constants[i].y);
PrintMessage(buffer);
}
unsigned *d_num_hashing_in = NULL;
#ifdef COUNT_HOW_MANY_HASH_INTO_EACH_SLOT
CUDA_SAFE_CALL(hipMalloc((void **)&d_num_hashing_in,
sizeof(unsigned) * table_size));
CUDA_SAFE_CALL(
hipMemset(d_num_hashing_in, 0, sizeof(unsigned) * table_size));
#endif
unsigned *d_num_slots_available = NULL;
#ifdef COUNT_HOW_MANY_HAVE_CYCLES
CUDA_SAFE_CALL(hipMalloc((void **)&d_num_slots_available,
sizeof(unsigned) * num_keys));
#endif
uint2 *d_constants = NULL;
CUDA_SAFE_CALL(hipMalloc((void **)&d_constants,
sizeof(uint2) * kNumHashFunctions));
CUDA_SAFE_CALL(hipMemcpy(d_constants, constants,
sizeof(uint2) * kNumHashFunctions,
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( take_hash_function_statistics_kernel), dim3(ComputeGridDim(num_keys)),
dim3(kBlockSize), 0, 0,
d_keys, num_keys, table_size, d_constants, kNumHashFunctions,
d_num_slots_available, d_num_hashing_in, NULL);
CUDA_SAFE_CALL(hipFree(d_constants));
#ifdef COUNT_HOW_MANY_HASH_INTO_EACH_SLOT
unsigned *num_hashing_in = new unsigned[table_size];
CUDA_SAFE_CALL(hipMemcpy(num_hashing_in, d_num_hashing_in,
sizeof(unsigned) * table_size,
hipMemcpyDeviceToHost));
/*
// Print how many items hash into each slot.
// Used to make sure items are spread evenly throughout the table.
buffer[0] = '\0';
PrintMessage("Num hashing into each: ", true);
for (unsigned i = 0; i < table_size; ++i) {
sprintf(buffer, "%s\t%2u", buffer, num_hashing_in[i]);
if (i % 25 == 24) {
PrintMessage(buffer, true);
buffer[0] = '\0';
}
}
PrintMessage(buffer,true);
*/
// Print a histogram of how many items are hashed into each slot. Shows
// if average number of items hashing into each slot is low.
std::sort(num_hashing_in, num_hashing_in + table_size);
int count = 1;
unsigned previous = num_hashing_in[0];
sprintf(buffer, "Num items hashing into a slot:\t");
PrintMessage(buffer);
for (unsigned i = 1; i < table_size; ++i) {
if (num_hashing_in[i] != previous) {
sprintf(buffer, "\t(%u, %u)", previous, count);
PrintMessage(buffer);
previous = num_hashing_in[i];
count = 1;
} else {
count++;
}
}
sprintf(buffer, "\t(%u, %u)", previous, count);
PrintMessage(buffer);
delete[] num_hashing_in;
CUDA_SAFE_CALL(hipFree(d_num_hashing_in));
#endif
#ifdef COUNT_HOW_MANY_HAVE_CYCLES
unsigned *num_slots_available = new unsigned[num_keys];
CUDA_SAFE_CALL(hipMemcpy(num_slots_available, d_num_slots_available,
sizeof(unsigned) * num_keys,
hipMemcpyDeviceToHost));
static const unsigned kHistogramSize = kNumHashFunctions + 1;
unsigned *histogram = new unsigned[kHistogramSize];
memset(histogram, 0, sizeof(unsigned) * kHistogramSize);
for (unsigned i = 0; i < num_keys; ++i) {
histogram[num_slots_available[i]]++;
}
sprintf(buffer, "Slots assigned to each key: ");
for (unsigned i = 1; i < kHistogramSize; ++i) {
sprintf(buffer, "%s(%u, %u) ", buffer, i, histogram[i]);
}
PrintMessage(buffer);
delete[] histogram;
delete[] num_slots_available;
CUDA_SAFE_CALL(hipFree(d_num_slots_available));
#endif
}
bool CheckAssignedSameSlot(const unsigned N,
const unsigned num_keys,
const unsigned long long *d_keys,
const unsigned table_size,
uint2 *constants)
{
unsigned *d_cycle_exists = NULL;
uint2 *d_constants = NULL;
CUDA_SAFE_CALL(hipMalloc((void **)&d_cycle_exists, sizeof(unsigned)));
CUDA_SAFE_CALL(hipMalloc((void **)&d_constants, sizeof(uint2) * N));
CUDA_SAFE_CALL(hipMemset(d_cycle_exists, 0, sizeof(unsigned)));
CUDA_SAFE_CALL(hipMemcpy(d_constants, constants, sizeof(uint2) * N,
hipMemcpyHostToDevice));
// Check if all keys were given a full set of N slots by the functions.
hipLaunchKernelGGL(( take_hash_function_statistics_kernel), dim3(ComputeGridDim(num_keys)),
dim3(kBlockSize), 0, 0,
d_keys, num_keys, table_size, d_constants, N, NULL, NULL,
d_cycle_exists);
unsigned cycle_exists;
CUDA_SAFE_CALL(hipMemcpy(&cycle_exists, d_cycle_exists,
sizeof(unsigned), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(d_cycle_exists));
CUDA_SAFE_CALL(hipFree(d_constants));
return (cycle_exists != 0);
}
void PrintStashContents(const Entry *d_stash)
{
Entry *stash = new Entry[CudaHT::CuckooHashing::kStashSize];
CUDA_SAFE_CALL(hipMemcpy(
stash, d_stash, sizeof(Entry) * CudaHT::CuckooHashing::kStashSize,
hipMemcpyDeviceToHost));
for (unsigned i = 0; i < CudaHT::CuckooHashing::kStashSize; ++i) {
if (get_key(stash[i]) != kKeyEmpty) {
char buffer[256];
sprintf(buffer, "Stash[%u]: %u = %u", i,
get_key(stash[i]), get_value(stash[i]));
PrintMessage(buffer, true);
}
}
delete[] stash;
}
}; // namespace CuckooHashing
}; // namespace CudaHT
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| b0a3e863a5743ca973ea8a1e8b63793b039a183b.cu | // -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision:$
// $Date:$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt in
// the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* debugging.cu
*
* @brief Debugging/statistics/performance utilities for hash tables.
*/
#include "debugging.h"
#include "definitions.h"
#include "cuHash64.cuh"
#include <algorithm>
#include "cuda_util.h"
namespace CudaHT
{
namespace CuckooHashing{
//! Debugging function: Takes statistics on the hash functions' distribution.
/*! Determines:
* - How many unique slots each key has.
* - How many keys hash into each slot.
* - Whether any keys failed to get a full set of slots.
*/
__global__ void take_hash_function_statistics_kernel(
const unsigned long long *keys,
const unsigned n_entries,
const unsigned table_size,
const uint2 *constants,
const unsigned num_functions,
unsigned *num_slots_available,
unsigned *num_hashing_in,
unsigned *failed)
{
unsigned thread_index = threadIdx.x + blockIdx.x * blockDim.x +
blockIdx.y * blockDim.x * gridDim.x;
if (thread_index >= n_entries)
return;
unsigned long long key = keys[thread_index];
// Determine all of the locations the key hashes into.
// Also count how many keys hash into each location.
unsigned locations[kMaxHashFunctions];
for (unsigned i = 0; i < num_functions; ++i) {
locations[i] =
hash_function_inner(constants[i], key) % table_size;
if (num_hashing_in != NULL) {
atomicAdd(num_hashing_in + locations[i], 1);
}
}
// Determine whether all of the locations were different.
unsigned num_slots = 1;
for (unsigned i = 1; i < num_functions; ++i) {
bool matched = false;
for (unsigned j = 0; j < i; ++j) {
if (locations[i] == locations[j]) {
matched = true;
break;
}
}
if (!matched) {
num_slots++;
}
}
if (num_slots_available != NULL) {
num_slots_available[thread_index] = num_slots;
}
if (failed != NULL && num_slots != num_functions) {
*failed = 1;
}
}
void TakeHashFunctionStatistics(const unsigned num_keys,
const unsigned long long *d_keys,
const unsigned table_size,
const uint2 *constants,
const unsigned kNumHashFunctions)
{
char buffer[16000];
PrintMessage("Hash function constants: ");
for (unsigned i = 0; i < kNumHashFunctions; ++i) {
sprintf(buffer, "\t%10u, %10u", constants[i].x, constants[i].y);
PrintMessage(buffer);
}
unsigned *d_num_hashing_in = NULL;
#ifdef COUNT_HOW_MANY_HASH_INTO_EACH_SLOT
CUDA_SAFE_CALL(cudaMalloc((void **)&d_num_hashing_in,
sizeof(unsigned) * table_size));
CUDA_SAFE_CALL(
cudaMemset(d_num_hashing_in, 0, sizeof(unsigned) * table_size));
#endif
unsigned *d_num_slots_available = NULL;
#ifdef COUNT_HOW_MANY_HAVE_CYCLES
CUDA_SAFE_CALL(cudaMalloc((void **)&d_num_slots_available,
sizeof(unsigned) * num_keys));
#endif
uint2 *d_constants = NULL;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_constants,
sizeof(uint2) * kNumHashFunctions));
CUDA_SAFE_CALL(cudaMemcpy(d_constants, constants,
sizeof(uint2) * kNumHashFunctions,
cudaMemcpyHostToDevice));
take_hash_function_statistics_kernel<<<ComputeGridDim(num_keys),
kBlockSize>>>(
d_keys, num_keys, table_size, d_constants, kNumHashFunctions,
d_num_slots_available, d_num_hashing_in, NULL);
CUDA_SAFE_CALL(cudaFree(d_constants));
#ifdef COUNT_HOW_MANY_HASH_INTO_EACH_SLOT
unsigned *num_hashing_in = new unsigned[table_size];
CUDA_SAFE_CALL(cudaMemcpy(num_hashing_in, d_num_hashing_in,
sizeof(unsigned) * table_size,
cudaMemcpyDeviceToHost));
/*
// Print how many items hash into each slot.
// Used to make sure items are spread evenly throughout the table.
buffer[0] = '\0';
PrintMessage("Num hashing into each: ", true);
for (unsigned i = 0; i < table_size; ++i) {
sprintf(buffer, "%s\t%2u", buffer, num_hashing_in[i]);
if (i % 25 == 24) {
PrintMessage(buffer, true);
buffer[0] = '\0';
}
}
PrintMessage(buffer,true);
*/
// Print a histogram of how many items are hashed into each slot. Shows
// if average number of items hashing into each slot is low.
std::sort(num_hashing_in, num_hashing_in + table_size);
int count = 1;
unsigned previous = num_hashing_in[0];
sprintf(buffer, "Num items hashing into a slot:\t");
PrintMessage(buffer);
for (unsigned i = 1; i < table_size; ++i) {
if (num_hashing_in[i] != previous) {
sprintf(buffer, "\t(%u, %u)", previous, count);
PrintMessage(buffer);
previous = num_hashing_in[i];
count = 1;
} else {
count++;
}
}
sprintf(buffer, "\t(%u, %u)", previous, count);
PrintMessage(buffer);
delete[] num_hashing_in;
CUDA_SAFE_CALL(cudaFree(d_num_hashing_in));
#endif
#ifdef COUNT_HOW_MANY_HAVE_CYCLES
unsigned *num_slots_available = new unsigned[num_keys];
CUDA_SAFE_CALL(cudaMemcpy(num_slots_available, d_num_slots_available,
sizeof(unsigned) * num_keys,
cudaMemcpyDeviceToHost));
static const unsigned kHistogramSize = kNumHashFunctions + 1;
unsigned *histogram = new unsigned[kHistogramSize];
memset(histogram, 0, sizeof(unsigned) * kHistogramSize);
for (unsigned i = 0; i < num_keys; ++i) {
histogram[num_slots_available[i]]++;
}
sprintf(buffer, "Slots assigned to each key: ");
for (unsigned i = 1; i < kHistogramSize; ++i) {
sprintf(buffer, "%s(%u, %u) ", buffer, i, histogram[i]);
}
PrintMessage(buffer);
delete[] histogram;
delete[] num_slots_available;
CUDA_SAFE_CALL(cudaFree(d_num_slots_available));
#endif
}
bool CheckAssignedSameSlot(const unsigned N,
const unsigned num_keys,
const unsigned long long *d_keys,
const unsigned table_size,
uint2 *constants)
{
unsigned *d_cycle_exists = NULL;
uint2 *d_constants = NULL;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_cycle_exists, sizeof(unsigned)));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_constants, sizeof(uint2) * N));
CUDA_SAFE_CALL(cudaMemset(d_cycle_exists, 0, sizeof(unsigned)));
CUDA_SAFE_CALL(cudaMemcpy(d_constants, constants, sizeof(uint2) * N,
cudaMemcpyHostToDevice));
// Check if all keys were given a full set of N slots by the functions.
take_hash_function_statistics_kernel<<<ComputeGridDim(num_keys),
kBlockSize>>>(
d_keys, num_keys, table_size, d_constants, N, NULL, NULL,
d_cycle_exists);
unsigned cycle_exists;
CUDA_SAFE_CALL(cudaMemcpy(&cycle_exists, d_cycle_exists,
sizeof(unsigned), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(d_cycle_exists));
CUDA_SAFE_CALL(cudaFree(d_constants));
return (cycle_exists != 0);
}
void PrintStashContents(const Entry *d_stash)
{
Entry *stash = new Entry[CudaHT::CuckooHashing::kStashSize];
CUDA_SAFE_CALL(cudaMemcpy(
stash, d_stash, sizeof(Entry) * CudaHT::CuckooHashing::kStashSize,
cudaMemcpyDeviceToHost));
for (unsigned i = 0; i < CudaHT::CuckooHashing::kStashSize; ++i) {
if (get_key(stash[i]) != kKeyEmpty) {
char buffer[256];
sprintf(buffer, "Stash[%u]: %u = %u", i,
get_key(stash[i]), get_value(stash[i]));
PrintMessage(buffer, true);
}
}
delete[] stash;
}
}; // namespace CuckooHashing
}; // namespace CudaHT
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
73abd1eaeddc94894095a24d79099e62485c1600.hip | // !!! This is a file automatically generated by hipify!!!
// nvcc -O3 -std=c++11 -use_fast_math -ccbin g++ -arch=compute_75 -code=sm_75 -expt-relaxed-constexpr
// Performs matrix mutliplication using shared memory tiles where ewach thread
// may need to calculate and move more than one data element. Assumes matrices
// stored in row major order. The loop structure followed is as(one level
// tiling)
//
// for(int i = 0; i < M; i += Mtile){ //Inter-tb-Tile
// for(int j = 0; j < N; j += Ntile){ //Inter-tb-Tile
// for(int k = 0; k < K; k += Ktile){ //Inter-tb-Tile
// for(int ii = i; ii < i + Mtile; ii += WarpMtile){ //Inter-warp-Tile
// for(int jj = j; jj < j + Ntile; jj += WarpNtile){ //Inter-warp-Tile
// for(int kk = k; kk < k + Ktile; kk += WarpKtile){ //Inter-warp-Tile
// for(int iii = ii; iii < ii + WarpMtile; ++iii){ //Intra-warp-Tile
// for(int jjj = jj; jjj < jj + WarpNtile; ++jjj){ //Intra-warp-Tile
// //mma.sync()
// }
// }
// }
// }
// }
// }
// }
// }
// }
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_fp16.h>
#include <mma.h>
#include <assert.h>
#include "common.h"
#define DTYPECD float
#define DTYPEAB __half
#define M 4096
#define N 4096
#define K 4096
#define WM 16
#define WN 16
#define WK 16
#define Mtile 64 // This will actually be the loop step of `i` loop.
#define Ntile 64 // This will actually be the loop step of `j` loop.
#define Ktile 16 // This will actually be the loop step of `k` loop.
#define WarpMtile 32
#define WarpNtile 32
#define WarpKtile 16 // 16 because the size supported by the wmma api is 16x16x16.
#define WarpSize 32
#define NUM_THREADS_PER_BLOCK (Mtile / WarpMtile) * (Ntile / WarpNtile) * WarpSize
#define PADDING_AB 8
#define MBLOCK 32
#define NBLOCK 32
#define C_LAYOUT wmma::mem_row_major
using namespace nvcuda;
// Seeing here that the thread block tile is going to calculate 128x128 output
// and one warp tile is to calculate 64x64 we only need 4 warps. i.e., we need to
// have 32 x 4 = 128 threads. A thread block of the kind 32 x 4 etc. is not possible.
// We need to have the thread block dimensions as multiples of 32 which 4 clearly isn't.
// so we need to launch 128 threads in one dimension only a whole lot of refactoring is
// needed.
using namespace std;
typedef struct {
unsigned x;
unsigned y;
unsigned z;
} WarpIdx;
__host__ void init_host_matrices(DTYPEAB *a, DTYPEAB *b, DTYPECD *c){
for(int i = 0; i < M; i++){
for(int j = 0; j < K; j++){
//a[i * K + j] = __float2half(static_cast <float> (rand()) / static_cast <float> (RAND_MAX) * 10);
a[i * K + j] = __float2half(static_cast<float>(static_cast<int>(static_cast <float> (rand()) / static_cast <float> (RAND_MAX) * 10) + 1));
//a[i * K + j] = __float2half(1.0f);
}
}
for(int i = 0; i < K; i++){
for (int j = 0; j < N; j++){
//b[i * N + j] = __float2half(static_cast <float> (rand()) / static_cast <float> (RAND_MAX) * 10);
b[i * N + j] = __float2half(static_cast<float>(static_cast<int>(static_cast <float> (rand()) / static_cast <float> (RAND_MAX) * 10) + 1));
//b[i * N + j] = __float2half(1.0f);
}
}
for (int t = 0; t < M * N; t++) {
c[t] = (DTYPECD) 0.0f;
}
}
__host__ void printMatrix(DTYPEAB * matrix, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
printf("%f ", __half2float((matrix[i * n + j])));
}
printf("\n");
}
printf("\n");
}
__host__ void printMatrixFloat(DTYPECD * matrix, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
printf("%f ", matrix[i * n + j]);
}
printf("\n");
}
printf("\n");
}
__global__ void GEMM_NAIVE(DTYPEAB * a, DTYPEAB * b, DTYPECD * c, int m, int n, int k){
int i_iter = blockIdx.y * blockDim.y + threadIdx.y;
int j_iter = blockIdx.x * blockDim.x + threadIdx.x;
DTYPECD temp = 0.0f;
for(int kk = 0; kk < k; ++kk){
if(i_iter < m && j_iter < n){
temp += __half2float(a[i_iter * k + kk]) * __half2float(b[kk * n + j_iter]);
}
}
c[i_iter * n + j_iter] = temp;
}
__global__ void GEMM(DTYPEAB * a, DTYPEAB * b, DTYPECD * c, DTYPECD * d, int m, int n, int k){
// Struct holding the geometrical coordinates of the warp.
WarpIdx warpIdx;
int numThreads = blockDim.x * blockDim.y * blockDim.z;
int numWarpsInM = 1, numWarpsInN = 1, numWarps = numThreads / WarpSize;
// Number of warps in the `M` and `N` dimension, of the thread block. If there are
// sufficient number of warps for the `N` dimension then use them. If not assign all
// of them to the `N` dimension. If excessive are present then assign the remaining
// them to the `M` dimension.
if(numWarps <= Ntile / WarpNtile){
numWarpsInN = numWarps;
}else{
numWarpsInN = Ntile / WarpNtile;
numWarpsInM = numWarps / numWarpsInN;
}
// Reserve shared memory tiles for the operands.
__shared__ DTYPEAB asmem[Mtile * Ktile];
__shared__ DTYPEAB bsmem[Ktile * Ntile];
__shared__ DTYPECD csmem[Mtile * Ntile];
__shared__ DTYPECD dsmem[Mtile * Ntile];
// Linear thread id in the thread block.
int linear_tid = (threadIdx.z * blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
//printf("%d \n", linear_tid);
//i Linear warp id in the thread block.
int linear_warpid = linear_tid / WarpSize;
//printf("%d \n", linear_warpid);
warpIdx.x = linear_warpid % numWarpsInN;
warpIdx.y = linear_warpid / numWarpsInN;
warpIdx.z = 1;
//printf("%d %d \n", warpIdx.y, warpIdx.x);
// Find the iteration of the original loop nest that maps to this thread
// block here.
// It is more elegant to map the iterations instead of row or col. At the end
// it doesn't matter becuase the iterations actually determine which row or
// col is it.
// The Outer loops iteration beginning that this thread block tile
// is responsible for. These coordinates also marks the beginning of the
// address a thread block needs to copy form the global memory to shared
// memory. This represents the coordinates in the data space not in the GPU
// (processor) space.
int i_iter_tile_base = blockIdx.y * Mtile; // Maps to inter-tile `i`. Global row
int j_iter_tile_base = blockIdx.x * Ntile; // Maps to inter-tile `j`. Global row
DTYPECD *c_tb_tile_base = c;
DTYPECD *d_tb_tile_base = d;
DTYPEAB *a_tb_tile_base = a;
DTYPEAB *b_tb_tile_base = b;
DTYPECD *c_tb_tile_offset = c_tb_tile_base + i_iter_tile_base * n + j_iter_tile_base;
DTYPECD *d_tb_tile_offset = d_tb_tile_base + i_iter_tile_base * n + j_iter_tile_base;
DTYPEAB *a_thread_tile_base_copy;
DTYPEAB *b_thread_tile_base_copy;
DTYPECD *c_thread_tile_base_copy = c_tb_tile_offset;
DTYPECD *d_thread_tile_base_copy = d_tb_tile_offset;
DTYPEAB *a_tb_tile_offset;
DTYPEAB *b_tb_tile_offset;
DTYPECD *c_warp_tile_base;
DTYPECD *d_warp_tile_base;
DTYPEAB *a_warp_tile_base;
DTYPEAB *b_warp_tile_base;
// c_warp_tile_offset will be in the global memory tile
// while for A and B they will be in the shared memory.
DTYPECD *c_warp_tile_offset;
DTYPECD *d_warp_tile_offset;
DTYPEAB *a_warp_tile_offset_compute;
DTYPEAB *b_warp_tile_offset_compute;
// warp_tile_base for compute is equal to the base address in shared memory.
a_warp_tile_base = &asmem[0];
b_warp_tile_base = &bsmem[0];
c_warp_tile_base = &csmem[0];
//d_warp_tile_base = &dsmem[0];
d_warp_tile_base = d_tb_tile_offset;
// Allocate accmulator fragments for the C warp tiles. The allocation happens at a per
// warp level. Each warp in the thread block will have this type of accumulator tile.
// This accumulator tile is to be kept alive even accross different iterations of the
// outermost k-loop.
wmma::fragment<wmma::accumulator, WM, WN, WK, DTYPECD> c_accum[WarpMtile / WM][WarpNtile / WN];
// Copy the c matrix into the shared memory memory for scaling.
int4 * cgmemBase = (int4 *)c_thread_tile_base_copy;
int4 * csmemBase = (int4 *)&csmem[0];
#pragma unroll
for(int i = linear_tid, e = Mtile * (Ntile / 4), x = blockDim.x * blockDim.y; i < e; i+= x){
*(csmemBase + ((i / (Ntile / 4)) * (Ntile / 4)) + (i % (Ntile / 4))) = *(cgmemBase + ((i / (Ntile / 4) * (N / 4)) + (i % (Ntile / 4))));
}
// Wait until all the elements are loaded into the shared memory. After that move them to the registers.
__syncthreads();
// These loops goes over warp tiles of dimension (WarpMtile, WarpNtile) inside the thread block tile.
// Useful when the number of warp tiles is more than the number of warps available. I.e., one warp
// is responsible for more than one warp tile. Ideally this loop will both these loops must have a single
// iteration.
for(int i_iter_warp_base = warpIdx.y * WarpMtile; i_iter_warp_base < Mtile; i_iter_warp_base += WarpMtile * numWarpsInM){
for(int j_iter_warp_base = warpIdx.x * WarpNtile; j_iter_warp_base < Ntile; j_iter_warp_base += WarpNtile * numWarpsInN){
//printf("%d %d\n", i_iter_warp_base, j_iter_warp_base).
c_warp_tile_offset = c_warp_tile_base + i_iter_warp_base * Ntile + j_iter_warp_base;
d_warp_tile_offset = d_warp_tile_base + i_iter_warp_base * N + j_iter_warp_base;
// Inter-warp-tile loop. Goes inside a thread block tile in steps of
// warpKtile. Tf WarpKtile is equal to Ktile then K dimesnion is not really
// tiled for the warp. Tiling for warp may result in reduced register pressure
// and hence may reduce spills.
// TODO: Try to see performance benifits of different tile sized in the k-dime
// nsion for warps.
// TODO: This needs to be fixed. If the WarpKtile is not 16 then the number of
// 16x16 operand tiles that are moved need to be changed. I.e., a_frag and b_frag
// now need to be 2-d arrays and one more loop inside this loop needs to be present
// which calculates the WarpKtile in chunks of 16 each.
// These fragments contain the register operands for the a and b matrix. These
// contain only the operands for calculating one k-dimension.
wmma::fragment<wmma::matrix_a, WM, WN, WK, DTYPEAB, wmma::row_major> a_frag[WarpMtile / WM];
wmma::fragment<wmma::matrix_b, WM, WN, WK, DTYPEAB, wmma::row_major> b_frag;
// We need to copy the corresponding tiles from shared memory to the registers
// in chunks of 16x16 each. This loop is meant to run only once per thread block tile.
wmma::load_matrix_sync(c_accum[0 / WM][0 / WN], c_warp_tile_offset + (0 * Ntile) + 0, Ntile, C_LAYOUT);
wmma::load_matrix_sync(c_accum[0 / WM][16 / WN], c_warp_tile_offset + (0 * Ntile) + 16, Ntile, C_LAYOUT);
wmma::load_matrix_sync(c_accum[16 / WM][0 / WN], c_warp_tile_offset + (16 * Ntile) + 0, Ntile, C_LAYOUT);
wmma::load_matrix_sync(c_accum[16 / WM][16 / WN], c_warp_tile_offset + (16 * Ntile) + 16, Ntile, C_LAYOUT);
// Wait until the accumulator tile is fully loaded.
__syncthreads();
//------Write code for fractional scaling here----//
//
//
//------------------------------------------------//
// K dimension is sequential so this is not mapped to the gpu compute
// heirarchy. Inter tile K-loop. Thread Block K-loop.
for(int kk = 0; kk < k; kk += Ktile){
//printf("kk:%d\n", kk);
// Base address in global tile of A & B operand thread block tile.
a_tb_tile_offset = a_tb_tile_base + i_iter_tile_base * k + kk;
b_tb_tile_offset = b_tb_tile_base + kk * n + j_iter_tile_base;
a_thread_tile_base_copy = a_tb_tile_offset;
b_thread_tile_base_copy = b_tb_tile_offset;
// Copy the operands from global to shared memory. Each thread copies the
// `chunktocopy` elements from global to shared memory. The thread Id's
// inside a thread block need to be linearized. Each thread copies it's
// contiguous chunk form global memory to the shared memroy.
int4 * agmemBase = (int4 *)a_thread_tile_base_copy;
int4 * asmemBase = (int4 *)&asmem[0];
#pragma unroll
for(int i = linear_tid, e = Mtile * (Ktile / 8), x = numThreads; i < e; i+= x){
*(asmemBase + ((i / (Ktile / 8)) * (Ktile / 8)) + (i % (Ktile / 8))) = *(agmemBase + ((i / (Ktile / 8) * (K / 8)) + (i % (Ktile / 8))));
}
int4 * bgmemBase = (int4 *)b_thread_tile_base_copy;
int4 * bsmemBase = (int4 *)&bsmem[0];
#pragma unroll
for(int i = linear_tid, e = Ktile * (Ntile / 8), x = numThreads; i < e; i+= x){
*(bsmemBase + ((i / (Ntile / 8)) * (Ntile / 8)) + (i % (Ntile / 8))) = *(bgmemBase + ((i / (Ntile / 8) * (N / 8)) + (i % (Ntile / 8))));
}
__syncthreads();
#pragma unroll
for(int kkk = 0; kkk < Ktile; kkk += WarpKtile){
if(kkk < k){
//printf("kk:%d kkk:%d kk+Ktile:%d\n", kk, kkk, kk + Ktile);
// Warp tile offset of asmem will only be dependent on the warpIdx.y i.e.,
// the row of the warp which is computing this particular part. This points to the
// starting address of this warp for the `a` operand.
a_warp_tile_offset_compute = a_warp_tile_base + (i_iter_warp_base * Ktile) + kkk;
// Warp tile offset of bsmem will only be dependent on the warpIdx.x i.e.,
// the col of the warp which is computing this particular part. This points to the
// starting address of this warp for the `b` operand.
b_warp_tile_offset_compute = b_warp_tile_base + (kkk * Ntile) + (j_iter_warp_base);
// The below loop nest was obtained by unroll-jamming the I loop by a factor of 2.
// It gives the same benifits as the if conditional in the original code.
//#pragma unroll
for(int i = 0; i < WarpMtile; i += WarpMtile){
// Load A fragments. The loads below can be represented by two different SSA values in MLIR.
wmma::load_matrix_sync(a_frag[i / WM], a_warp_tile_offset_compute + (i * Ktile), Ktile);
wmma::load_matrix_sync(a_frag[(i + WM) / WM], a_warp_tile_offset_compute + ((i + WM) * Ktile), Ktile);
//#pragma unroll
for(int j = 0; j < WarpNtile; j += WarpNtile){
// Load B fragments. The load below can be represented by one SSA value in MLIR.
wmma::load_matrix_sync(b_frag, b_warp_tile_offset_compute + j, Ntile);
// call mma.sync();
wmma::mma_sync(c_accum[i/ WM][j / WN], a_frag[i / WM], b_frag, c_accum[i / WM][j / WN]);
wmma::mma_sync(c_accum[(i + WM)/ WM][j / WN], a_frag[(i + WM) / WM], b_frag, c_accum[(i + WM) / WM][j / WN]);
//---------------------------------unrolled iteration-------------------------//
// Load B fragments. The load below can be represented by one SSA value in MLIR.
wmma::load_matrix_sync(b_frag, b_warp_tile_offset_compute + (j + WN), Ntile);
// call mma.sync();
wmma::mma_sync(c_accum[i/ WM][(j + WN) / WN], a_frag[i / WM], b_frag, c_accum[i / WM][(j + WN) / WN]);
wmma::mma_sync(c_accum[(i + WM)/ WM][(j + WN) / WN], a_frag[(i + WM) / WM], b_frag, c_accum[(i + WM) / WM][(j + WN) / WN]);
}
}
// Sync before moving data for next iteration into the register tiles.
//__syncthreads();
}
}
// Sync before copying data for next thread block tile.
__syncthreads();
}
}
}
// K-dimension processing of one warp is finished. We can copy the accum fragment
// corresponding to this warp to the result array `d` in global memory.
// TODO: currently assuming that one warp tile is only mapped to one warp, i.e.,
// one warp needs to calculate only one warp tile. Hence d_warp_tile_offset is known
// form the last time it was calculated and hence reused here.
#pragma unroll
for(int i = 0; i < WarpMtile; i += WM){
#pragma unroll
for(int j = 0; j < WarpNtile; j += WN){
wmma::store_matrix_sync(d_warp_tile_offset + ((i * N) + j), c_accum[i / WM][j/ WN], N, C_LAYOUT);
}
}
// Write the D matrix into the global memory.
//int4 * dgmemBase = (int4 *)d_thread_tile_base_copy;
//int4 * dsmemBase = (int4 *)&dsmem[0];
//#pragma unroll
//for(int i = linear_tid, e = Mtile * (Ntile / 4), x = blockDim.x * blockDim.y; i < e; i+= x){
// *(dgmemBase + ((i / (N / 4) * (N / 4)) + (i % (N / 4)))) = *(dsmemBase + ((i / (Ntile / 4)) * (Ntile / 4)) + (i % (Ntile / 4)));
//}
}
void hostGEMM(DTYPEAB * a, DTYPEAB * b, DTYPECD * c, int m, int n, int k){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j ){
DTYPECD temp = 0.0f;
for(int kk = 0; kk < k ; ++kk){
temp += __half2float(a[i * k + kk]) * __half2float(b[kk * n + j]);
}
c[i * n + j] = temp;
}
}
}
void compareGEMM(DTYPECD * h_c, DTYPECD * h_c_gpu_res, int m, int n){
int counter = 0;
for (int i = 0; i < N * M; i++) {
if(fabs(h_c_gpu_res[i] - h_c[i]) > 0.5f){
printf("DEBUG_GPU: mismatch i=%d result_Device=%f result_host=%f\n", i, h_c_gpu_res[i], h_c[i]);
counter++;
}
}
if(counter != 0)
printf("DEBUG_CPU: Output does not match!: %d, %d\n", counter, m * n);
else
printf("DEBUG_CPU: Output matches!\n");
}
__global__ void compareGEMMOnDevice(DTYPECD * d_d, DTYPECD * d_d_naive, int m, int n){
int counter = 0;
for (int i = 0; i < m * n; i++) {
if(fabs(d_d[i] - d_d_naive[i]) > 0.5f){
//printf("DEBUG_CPU: mismatch i=%d result_opt=%f result_niave=%f difference=%f\n", i, d_d[i], d_d_naive[i], fabs(d_d[i] - d_d_naive[i]));
++counter;
}
}
if(counter != 0)
printf("DEBUG_GPU: Output does not match!: %d, %d\n", counter, m * n);
else
printf("DEBUG_GPU: Output matches!\n");
}
int main(){
DTYPEAB *d_a, *d_b, *h_a, *h_b;
DTYPECD *d_c, *d_d, *h_c, *h_c_gpu_res, *d_c_naive;
int m ,n, k;
m = M;
n = N;
k = K;
h_a = (DTYPEAB*) malloc(m * k * sizeof(DTYPEAB));
h_b = (DTYPEAB*) malloc(k * n * sizeof(DTYPEAB));
h_c = (DTYPECD*) malloc(m * n * sizeof(DTYPECD));
h_c_gpu_res = (DTYPECD*) malloc(m * n * sizeof(DTYPECD));
check_cuda_error(hipMalloc(&d_a, m * k * sizeof(DTYPEAB)));
check_cuda_error(hipMalloc(&d_b, k * n * sizeof(DTYPEAB)));
check_cuda_error(hipMalloc(&d_c, m * n * sizeof(DTYPECD)));
check_cuda_error(hipMalloc(&d_d, m * n * sizeof(DTYPECD)));
check_cuda_error(hipMalloc(&d_c_naive, m * n * sizeof(DTYPECD)));
assert(((unsigned long long)d_a) % 128 == 0);
assert(((unsigned long long)d_b) % 128 == 0);
assert(((unsigned long long)d_c) % 128 == 0);
assert(((unsigned long long)d_d) % 128 == 0);
init_host_matrices(h_a, h_b, h_c);
check_cuda_error(hipMemcpy(d_a, h_a, m * k * sizeof(DTYPEAB), hipMemcpyHostToDevice));
check_cuda_error(hipMemcpy(d_b, h_b, k * n * sizeof(DTYPEAB), hipMemcpyHostToDevice));
check_cuda_error(hipMemcpy(d_c, h_c, m * n * sizeof(DTYPECD), hipMemcpyHostToDevice));
dim3 block(NUM_THREADS_PER_BLOCK, 1, 1);
dim3 grid((n + Ntile - 1) / Ntile, (m + Mtile - 1) / Mtile, 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
hipLaunchKernelGGL(( GEMM), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, d_d, m , n, k);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
double flopsPerMatrixMul = 2.0 * (double) m * (double) n * (double) k;
double teraFlops = (flopsPerMatrixMul * 1.0e-12f) / (msecTotal / 1000.0f);
cout<<"PERF: "<<teraFlops<<"Tflops \n";
check_cuda_error(hipPeekAtLastError());
check_cuda_error(hipDeviceSynchronize());
hipMemcpy(h_c_gpu_res, d_d, m * n * sizeof(DTYPECD), hipMemcpyDeviceToHost);
#ifdef DEBUG_GPU
dim3 block2(NBLOCK, MBLOCK, 1);
dim3 grid2((n + NBLOCK - 1) / NBLOCK, (m + MBLOCK - 1) / MBLOCK, 1);
hipLaunchKernelGGL(( GEMM_NAIVE), dim3(grid2), dim3(block2), 0, 0, d_a, d_b, d_c_naive, m , n, k);
check_cuda_error(hipPeekAtLastError());
check_cuda_error(hipDeviceSynchronize());
hipLaunchKernelGGL(( compareGEMMOnDevice), dim3(1), dim3(1), 0, 0, d_d, d_c_naive, m, n);
#endif
#ifdef DEBUG
hostGEMM(h_a, h_b, h_c, m, n, k);
compareGEMM(h_c, h_c_gpu_res, m, n);
#endif
#ifdef PRINT_HOST
printMatrixFloat(h_c, m, n);
#endif
#ifdef PRINT_GPU
printMatrixFloat(h_c_gpu_res, m, n);
#endif
free(h_a);
free(h_b);
free(h_c);
free(h_c_gpu_res);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_d);
return 0;
}
| 73abd1eaeddc94894095a24d79099e62485c1600.cu | // nvcc -O3 -std=c++11 -use_fast_math -ccbin g++ -arch=compute_75 -code=sm_75 -expt-relaxed-constexpr
// Performs matrix mutliplication using shared memory tiles where ewach thread
// may need to calculate and move more than one data element. Assumes matrices
// stored in row major order. The loop structure followed is as(one level
// tiling)
//
// for(int i = 0; i < M; i += Mtile){ //Inter-tb-Tile
// for(int j = 0; j < N; j += Ntile){ //Inter-tb-Tile
// for(int k = 0; k < K; k += Ktile){ //Inter-tb-Tile
// for(int ii = i; ii < i + Mtile; ii += WarpMtile){ //Inter-warp-Tile
// for(int jj = j; jj < j + Ntile; jj += WarpNtile){ //Inter-warp-Tile
// for(int kk = k; kk < k + Ktile; kk += WarpKtile){ //Inter-warp-Tile
// for(int iii = ii; iii < ii + WarpMtile; ++iii){ //Intra-warp-Tile
// for(int jjj = jj; jjj < jj + WarpNtile; ++jjj){ //Intra-warp-Tile
// //mma.sync()
// }
// }
// }
// }
// }
// }
// }
// }
// }
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <cuda_fp16.h>
#include <mma.h>
#include <assert.h>
#include "common.h"
#define DTYPECD float
#define DTYPEAB __half
#define M 4096
#define N 4096
#define K 4096
#define WM 16
#define WN 16
#define WK 16
#define Mtile 64 // This will actually be the loop step of `i` loop.
#define Ntile 64 // This will actually be the loop step of `j` loop.
#define Ktile 16 // This will actually be the loop step of `k` loop.
#define WarpMtile 32
#define WarpNtile 32
#define WarpKtile 16 // 16 because the size supported by the wmma api is 16x16x16.
#define WarpSize 32
#define NUM_THREADS_PER_BLOCK (Mtile / WarpMtile) * (Ntile / WarpNtile) * WarpSize
#define PADDING_AB 8
#define MBLOCK 32
#define NBLOCK 32
#define C_LAYOUT wmma::mem_row_major
using namespace nvcuda;
// Seeing here that the thread block tile is going to calculate 128x128 output
// and one warp tile is to calculate 64x64 we only need 4 warps. i.e., we need to
// have 32 x 4 = 128 threads. A thread block of the kind 32 x 4 etc. is not possible.
// We need to have the thread block dimensions as multiples of 32 which 4 clearly isn't.
// so we need to launch 128 threads in one dimension only a whole lot of refactoring is
// needed.
using namespace std;
typedef struct {
unsigned x;
unsigned y;
unsigned z;
} WarpIdx;
__host__ void init_host_matrices(DTYPEAB *a, DTYPEAB *b, DTYPECD *c){
for(int i = 0; i < M; i++){
for(int j = 0; j < K; j++){
//a[i * K + j] = __float2half(static_cast <float> (rand()) / static_cast <float> (RAND_MAX) * 10);
a[i * K + j] = __float2half(static_cast<float>(static_cast<int>(static_cast <float> (rand()) / static_cast <float> (RAND_MAX) * 10) + 1));
//a[i * K + j] = __float2half(1.0f);
}
}
for(int i = 0; i < K; i++){
for (int j = 0; j < N; j++){
//b[i * N + j] = __float2half(static_cast <float> (rand()) / static_cast <float> (RAND_MAX) * 10);
b[i * N + j] = __float2half(static_cast<float>(static_cast<int>(static_cast <float> (rand()) / static_cast <float> (RAND_MAX) * 10) + 1));
//b[i * N + j] = __float2half(1.0f);
}
}
for (int t = 0; t < M * N; t++) {
c[t] = (DTYPECD) 0.0f;
}
}
__host__ void printMatrix(DTYPEAB * matrix, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
printf("%f ", __half2float((matrix[i * n + j])));
}
printf("\n");
}
printf("\n");
}
__host__ void printMatrixFloat(DTYPECD * matrix, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
printf("%f ", matrix[i * n + j]);
}
printf("\n");
}
printf("\n");
}
__global__ void GEMM_NAIVE(DTYPEAB * a, DTYPEAB * b, DTYPECD * c, int m, int n, int k){
int i_iter = blockIdx.y * blockDim.y + threadIdx.y;
int j_iter = blockIdx.x * blockDim.x + threadIdx.x;
DTYPECD temp = 0.0f;
for(int kk = 0; kk < k; ++kk){
if(i_iter < m && j_iter < n){
temp += __half2float(a[i_iter * k + kk]) * __half2float(b[kk * n + j_iter]);
}
}
c[i_iter * n + j_iter] = temp;
}
__global__ void GEMM(DTYPEAB * a, DTYPEAB * b, DTYPECD * c, DTYPECD * d, int m, int n, int k){
// Struct holding the geometrical coordinates of the warp.
WarpIdx warpIdx;
int numThreads = blockDim.x * blockDim.y * blockDim.z;
int numWarpsInM = 1, numWarpsInN = 1, numWarps = numThreads / WarpSize;
// Number of warps in the `M` and `N` dimension, of the thread block. If there are
// sufficient number of warps for the `N` dimension then use them. If not assign all
// of them to the `N` dimension. If excessive are present then assign the remaining
// them to the `M` dimension.
if(numWarps <= Ntile / WarpNtile){
numWarpsInN = numWarps;
}else{
numWarpsInN = Ntile / WarpNtile;
numWarpsInM = numWarps / numWarpsInN;
}
// Reserve shared memory tiles for the operands.
__shared__ DTYPEAB asmem[Mtile * Ktile];
__shared__ DTYPEAB bsmem[Ktile * Ntile];
__shared__ DTYPECD csmem[Mtile * Ntile];
__shared__ DTYPECD dsmem[Mtile * Ntile];
// Linear thread id in the thread block.
int linear_tid = (threadIdx.z * blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
//printf("%d \n", linear_tid);
//i Linear warp id in the thread block.
int linear_warpid = linear_tid / WarpSize;
//printf("%d \n", linear_warpid);
warpIdx.x = linear_warpid % numWarpsInN;
warpIdx.y = linear_warpid / numWarpsInN;
warpIdx.z = 1;
//printf("%d %d \n", warpIdx.y, warpIdx.x);
// Find the iteration of the original loop nest that maps to this thread
// block here.
// It is more elegant to map the iterations instead of row or col. At the end
// it doesn't matter becuase the iterations actually determine which row or
// col is it.
// The Outer loops iteration beginning that this thread block tile
// is responsible for. These coordinates also marks the beginning of the
// address a thread block needs to copy form the global memory to shared
// memory. This represents the coordinates in the data space not in the GPU
// (processor) space.
int i_iter_tile_base = blockIdx.y * Mtile; // Maps to inter-tile `i`. Global row
int j_iter_tile_base = blockIdx.x * Ntile; // Maps to inter-tile `j`. Global row
DTYPECD *c_tb_tile_base = c;
DTYPECD *d_tb_tile_base = d;
DTYPEAB *a_tb_tile_base = a;
DTYPEAB *b_tb_tile_base = b;
DTYPECD *c_tb_tile_offset = c_tb_tile_base + i_iter_tile_base * n + j_iter_tile_base;
DTYPECD *d_tb_tile_offset = d_tb_tile_base + i_iter_tile_base * n + j_iter_tile_base;
DTYPEAB *a_thread_tile_base_copy;
DTYPEAB *b_thread_tile_base_copy;
DTYPECD *c_thread_tile_base_copy = c_tb_tile_offset;
DTYPECD *d_thread_tile_base_copy = d_tb_tile_offset;
DTYPEAB *a_tb_tile_offset;
DTYPEAB *b_tb_tile_offset;
DTYPECD *c_warp_tile_base;
DTYPECD *d_warp_tile_base;
DTYPEAB *a_warp_tile_base;
DTYPEAB *b_warp_tile_base;
// c_warp_tile_offset will be in the global memory tile
// while for A and B they will be in the shared memory.
DTYPECD *c_warp_tile_offset;
DTYPECD *d_warp_tile_offset;
DTYPEAB *a_warp_tile_offset_compute;
DTYPEAB *b_warp_tile_offset_compute;
// warp_tile_base for compute is equal to the base address in shared memory.
a_warp_tile_base = &asmem[0];
b_warp_tile_base = &bsmem[0];
c_warp_tile_base = &csmem[0];
//d_warp_tile_base = &dsmem[0];
d_warp_tile_base = d_tb_tile_offset;
// Allocate accmulator fragments for the C warp tiles. The allocation happens at a per
// warp level. Each warp in the thread block will have this type of accumulator tile.
// This accumulator tile is to be kept alive even accross different iterations of the
// outermost k-loop.
wmma::fragment<wmma::accumulator, WM, WN, WK, DTYPECD> c_accum[WarpMtile / WM][WarpNtile / WN];
// Copy the c matrix into the shared memory memory for scaling.
int4 * cgmemBase = (int4 *)c_thread_tile_base_copy;
int4 * csmemBase = (int4 *)&csmem[0];
#pragma unroll
for(int i = linear_tid, e = Mtile * (Ntile / 4), x = blockDim.x * blockDim.y; i < e; i+= x){
*(csmemBase + ((i / (Ntile / 4)) * (Ntile / 4)) + (i % (Ntile / 4))) = *(cgmemBase + ((i / (Ntile / 4) * (N / 4)) + (i % (Ntile / 4))));
}
// Wait until all the elements are loaded into the shared memory. After that move them to the registers.
__syncthreads();
// These loops goes over warp tiles of dimension (WarpMtile, WarpNtile) inside the thread block tile.
// Useful when the number of warp tiles is more than the number of warps available. I.e., one warp
// is responsible for more than one warp tile. Ideally this loop will both these loops must have a single
// iteration.
for(int i_iter_warp_base = warpIdx.y * WarpMtile; i_iter_warp_base < Mtile; i_iter_warp_base += WarpMtile * numWarpsInM){
for(int j_iter_warp_base = warpIdx.x * WarpNtile; j_iter_warp_base < Ntile; j_iter_warp_base += WarpNtile * numWarpsInN){
//printf("%d %d\n", i_iter_warp_base, j_iter_warp_base).
c_warp_tile_offset = c_warp_tile_base + i_iter_warp_base * Ntile + j_iter_warp_base;
d_warp_tile_offset = d_warp_tile_base + i_iter_warp_base * N + j_iter_warp_base;
// Inter-warp-tile loop. Goes inside a thread block tile in steps of
// warpKtile. Tf WarpKtile is equal to Ktile then K dimesnion is not really
// tiled for the warp. Tiling for warp may result in reduced register pressure
// and hence may reduce spills.
// TODO: Try to see performance benifits of different tile sized in the k-dime
// nsion for warps.
// TODO: This needs to be fixed. If the WarpKtile is not 16 then the number of
// 16x16 operand tiles that are moved need to be changed. I.e., a_frag and b_frag
// now need to be 2-d arrays and one more loop inside this loop needs to be present
// which calculates the WarpKtile in chunks of 16 each.
// These fragments contain the register operands for the a and b matrix. These
// contain only the operands for calculating one k-dimension.
wmma::fragment<wmma::matrix_a, WM, WN, WK, DTYPEAB, wmma::row_major> a_frag[WarpMtile / WM];
wmma::fragment<wmma::matrix_b, WM, WN, WK, DTYPEAB, wmma::row_major> b_frag;
// We need to copy the corresponding tiles from shared memory to the registers
// in chunks of 16x16 each. This loop is meant to run only once per thread block tile.
wmma::load_matrix_sync(c_accum[0 / WM][0 / WN], c_warp_tile_offset + (0 * Ntile) + 0, Ntile, C_LAYOUT);
wmma::load_matrix_sync(c_accum[0 / WM][16 / WN], c_warp_tile_offset + (0 * Ntile) + 16, Ntile, C_LAYOUT);
wmma::load_matrix_sync(c_accum[16 / WM][0 / WN], c_warp_tile_offset + (16 * Ntile) + 0, Ntile, C_LAYOUT);
wmma::load_matrix_sync(c_accum[16 / WM][16 / WN], c_warp_tile_offset + (16 * Ntile) + 16, Ntile, C_LAYOUT);
// Wait until the accumulator tile is fully loaded.
__syncthreads();
//------Write code for fractional scaling here----//
//
//
//------------------------------------------------//
// K dimension is sequential so this is not mapped to the gpu compute
// heirarchy. Inter tile K-loop. Thread Block K-loop.
for(int kk = 0; kk < k; kk += Ktile){
//printf("kk:%d\n", kk);
// Base address in global tile of A & B operand thread block tile.
a_tb_tile_offset = a_tb_tile_base + i_iter_tile_base * k + kk;
b_tb_tile_offset = b_tb_tile_base + kk * n + j_iter_tile_base;
a_thread_tile_base_copy = a_tb_tile_offset;
b_thread_tile_base_copy = b_tb_tile_offset;
// Copy the operands from global to shared memory. Each thread copies the
// `chunktocopy` elements from global to shared memory. The thread Id's
// inside a thread block need to be linearized. Each thread copies it's
// contiguous chunk form global memory to the shared memroy.
int4 * agmemBase = (int4 *)a_thread_tile_base_copy;
int4 * asmemBase = (int4 *)&asmem[0];
#pragma unroll
for(int i = linear_tid, e = Mtile * (Ktile / 8), x = numThreads; i < e; i+= x){
*(asmemBase + ((i / (Ktile / 8)) * (Ktile / 8)) + (i % (Ktile / 8))) = *(agmemBase + ((i / (Ktile / 8) * (K / 8)) + (i % (Ktile / 8))));
}
int4 * bgmemBase = (int4 *)b_thread_tile_base_copy;
int4 * bsmemBase = (int4 *)&bsmem[0];
#pragma unroll
for(int i = linear_tid, e = Ktile * (Ntile / 8), x = numThreads; i < e; i+= x){
*(bsmemBase + ((i / (Ntile / 8)) * (Ntile / 8)) + (i % (Ntile / 8))) = *(bgmemBase + ((i / (Ntile / 8) * (N / 8)) + (i % (Ntile / 8))));
}
__syncthreads();
#pragma unroll
for(int kkk = 0; kkk < Ktile; kkk += WarpKtile){
if(kkk < k){
//printf("kk:%d kkk:%d kk+Ktile:%d\n", kk, kkk, kk + Ktile);
// Warp tile offset of asmem will only be dependent on the warpIdx.y i.e.,
// the row of the warp which is computing this particular part. This points to the
// starting address of this warp for the `a` operand.
a_warp_tile_offset_compute = a_warp_tile_base + (i_iter_warp_base * Ktile) + kkk;
// Warp tile offset of bsmem will only be dependent on the warpIdx.x i.e.,
// the col of the warp which is computing this particular part. This points to the
// starting address of this warp for the `b` operand.
b_warp_tile_offset_compute = b_warp_tile_base + (kkk * Ntile) + (j_iter_warp_base);
// The below loop nest was obtained by unroll-jamming the I loop by a factor of 2.
// It gives the same benifits as the if conditional in the original code.
//#pragma unroll
for(int i = 0; i < WarpMtile; i += WarpMtile){
// Load A fragments. The loads below can be represented by two different SSA values in MLIR.
wmma::load_matrix_sync(a_frag[i / WM], a_warp_tile_offset_compute + (i * Ktile), Ktile);
wmma::load_matrix_sync(a_frag[(i + WM) / WM], a_warp_tile_offset_compute + ((i + WM) * Ktile), Ktile);
//#pragma unroll
for(int j = 0; j < WarpNtile; j += WarpNtile){
// Load B fragments. The load below can be represented by one SSA value in MLIR.
wmma::load_matrix_sync(b_frag, b_warp_tile_offset_compute + j, Ntile);
// call mma.sync();
wmma::mma_sync(c_accum[i/ WM][j / WN], a_frag[i / WM], b_frag, c_accum[i / WM][j / WN]);
wmma::mma_sync(c_accum[(i + WM)/ WM][j / WN], a_frag[(i + WM) / WM], b_frag, c_accum[(i + WM) / WM][j / WN]);
//---------------------------------unrolled iteration-------------------------//
// Load B fragments. The load below can be represented by one SSA value in MLIR.
wmma::load_matrix_sync(b_frag, b_warp_tile_offset_compute + (j + WN), Ntile);
// call mma.sync();
wmma::mma_sync(c_accum[i/ WM][(j + WN) / WN], a_frag[i / WM], b_frag, c_accum[i / WM][(j + WN) / WN]);
wmma::mma_sync(c_accum[(i + WM)/ WM][(j + WN) / WN], a_frag[(i + WM) / WM], b_frag, c_accum[(i + WM) / WM][(j + WN) / WN]);
}
}
// Sync before moving data for next iteration into the register tiles.
//__syncthreads();
}
}
// Sync before copying data for next thread block tile.
__syncthreads();
}
}
}
// K-dimension processing of one warp is finished. We can copy the accum fragment
// corresponding to this warp to the result array `d` in global memory.
// TODO: currently assuming that one warp tile is only mapped to one warp, i.e.,
// one warp needs to calculate only one warp tile. Hence d_warp_tile_offset is known
// form the last time it was calculated and hence reused here.
#pragma unroll
for(int i = 0; i < WarpMtile; i += WM){
#pragma unroll
for(int j = 0; j < WarpNtile; j += WN){
wmma::store_matrix_sync(d_warp_tile_offset + ((i * N) + j), c_accum[i / WM][j/ WN], N, C_LAYOUT);
}
}
// Write the D matrix into the global memory.
//int4 * dgmemBase = (int4 *)d_thread_tile_base_copy;
//int4 * dsmemBase = (int4 *)&dsmem[0];
//#pragma unroll
//for(int i = linear_tid, e = Mtile * (Ntile / 4), x = blockDim.x * blockDim.y; i < e; i+= x){
// *(dgmemBase + ((i / (N / 4) * (N / 4)) + (i % (N / 4)))) = *(dsmemBase + ((i / (Ntile / 4)) * (Ntile / 4)) + (i % (Ntile / 4)));
//}
}
void hostGEMM(DTYPEAB * a, DTYPEAB * b, DTYPECD * c, int m, int n, int k){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j ){
DTYPECD temp = 0.0f;
for(int kk = 0; kk < k ; ++kk){
temp += __half2float(a[i * k + kk]) * __half2float(b[kk * n + j]);
}
c[i * n + j] = temp;
}
}
}
void compareGEMM(DTYPECD * h_c, DTYPECD * h_c_gpu_res, int m, int n){
int counter = 0;
for (int i = 0; i < N * M; i++) {
if(fabs(h_c_gpu_res[i] - h_c[i]) > 0.5f){
printf("DEBUG_GPU: mismatch i=%d result_Device=%f result_host=%f\n", i, h_c_gpu_res[i], h_c[i]);
counter++;
}
}
if(counter != 0)
printf("DEBUG_CPU: Output does not match!: %d, %d\n", counter, m * n);
else
printf("DEBUG_CPU: Output matches!\n");
}
__global__ void compareGEMMOnDevice(DTYPECD * d_d, DTYPECD * d_d_naive, int m, int n){
int counter = 0;
for (int i = 0; i < m * n; i++) {
if(fabs(d_d[i] - d_d_naive[i]) > 0.5f){
//printf("DEBUG_CPU: mismatch i=%d result_opt=%f result_niave=%f difference=%f\n", i, d_d[i], d_d_naive[i], fabs(d_d[i] - d_d_naive[i]));
++counter;
}
}
if(counter != 0)
printf("DEBUG_GPU: Output does not match!: %d, %d\n", counter, m * n);
else
printf("DEBUG_GPU: Output matches!\n");
}
int main(){
DTYPEAB *d_a, *d_b, *h_a, *h_b;
DTYPECD *d_c, *d_d, *h_c, *h_c_gpu_res, *d_c_naive;
int m ,n, k;
m = M;
n = N;
k = K;
h_a = (DTYPEAB*) malloc(m * k * sizeof(DTYPEAB));
h_b = (DTYPEAB*) malloc(k * n * sizeof(DTYPEAB));
h_c = (DTYPECD*) malloc(m * n * sizeof(DTYPECD));
h_c_gpu_res = (DTYPECD*) malloc(m * n * sizeof(DTYPECD));
check_cuda_error(cudaMalloc(&d_a, m * k * sizeof(DTYPEAB)));
check_cuda_error(cudaMalloc(&d_b, k * n * sizeof(DTYPEAB)));
check_cuda_error(cudaMalloc(&d_c, m * n * sizeof(DTYPECD)));
check_cuda_error(cudaMalloc(&d_d, m * n * sizeof(DTYPECD)));
check_cuda_error(cudaMalloc(&d_c_naive, m * n * sizeof(DTYPECD)));
assert(((unsigned long long)d_a) % 128 == 0);
assert(((unsigned long long)d_b) % 128 == 0);
assert(((unsigned long long)d_c) % 128 == 0);
assert(((unsigned long long)d_d) % 128 == 0);
init_host_matrices(h_a, h_b, h_c);
check_cuda_error(cudaMemcpy(d_a, h_a, m * k * sizeof(DTYPEAB), cudaMemcpyHostToDevice));
check_cuda_error(cudaMemcpy(d_b, h_b, k * n * sizeof(DTYPEAB), cudaMemcpyHostToDevice));
check_cuda_error(cudaMemcpy(d_c, h_c, m * n * sizeof(DTYPECD), cudaMemcpyHostToDevice));
dim3 block(NUM_THREADS_PER_BLOCK, 1, 1);
dim3 grid((n + Ntile - 1) / Ntile, (m + Mtile - 1) / Mtile, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
GEMM<<<grid, block>>>(d_a, d_b, d_c, d_d, m , n, k);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
double flopsPerMatrixMul = 2.0 * (double) m * (double) n * (double) k;
double teraFlops = (flopsPerMatrixMul * 1.0e-12f) / (msecTotal / 1000.0f);
cout<<"PERF: "<<teraFlops<<"Tflops \n";
check_cuda_error(cudaPeekAtLastError());
check_cuda_error(cudaDeviceSynchronize());
cudaMemcpy(h_c_gpu_res, d_d, m * n * sizeof(DTYPECD), cudaMemcpyDeviceToHost);
#ifdef DEBUG_GPU
dim3 block2(NBLOCK, MBLOCK, 1);
dim3 grid2((n + NBLOCK - 1) / NBLOCK, (m + MBLOCK - 1) / MBLOCK, 1);
GEMM_NAIVE<<<grid2, block2>>>(d_a, d_b, d_c_naive, m , n, k);
check_cuda_error(cudaPeekAtLastError());
check_cuda_error(cudaDeviceSynchronize());
compareGEMMOnDevice<<<1, 1>>>(d_d, d_c_naive, m, n);
#endif
#ifdef DEBUG
hostGEMM(h_a, h_b, h_c, m, n, k);
compareGEMM(h_c, h_c_gpu_res, m, n);
#endif
#ifdef PRINT_HOST
printMatrixFloat(h_c, m, n);
#endif
#ifdef PRINT_GPU
printMatrixFloat(h_c_gpu_res, m, n);
#endif
free(h_a);
free(h_b);
free(h_c);
free(h_c_gpu_res);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_d);
return 0;
}
|
ca7a16eed5fa83e46439b56288d3f013b8c86cca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
<<<<<<< HEAD
#include <cmath>
#include <vector>
#include "caffe/layers/sigmoid_layer.hpp"
=======
#include <algorithm>
#include <cmath>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
>>>>>>> caffe-yolo/master
namespace caffe {
template <typename Dtype>
__global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = 1. / (1. + exp(-in[index]));
}
}
template <typename Dtype>
void SigmoidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void SigmoidBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype sigmoid_x = out_data[index];
out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);
}
}
template <typename Dtype>
void SigmoidLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidLayer);
} // namespace caffe
| ca7a16eed5fa83e46439b56288d3f013b8c86cca.cu | <<<<<<< HEAD
#include <cmath>
#include <vector>
#include "caffe/layers/sigmoid_layer.hpp"
=======
#include <algorithm>
#include <cmath>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
>>>>>>> caffe-yolo/master
namespace caffe {
template <typename Dtype>
__global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = 1. / (1. + exp(-in[index]));
}
}
template <typename Dtype>
void SigmoidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void SigmoidBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype sigmoid_x = out_data[index];
out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);
}
}
template <typename Dtype>
void SigmoidLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidLayer);
} // namespace caffe
|
2ed0f6c2af29e21b4d5e2a51e4598c4f60d21a6a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "EmptyKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
EmptyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
EmptyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
EmptyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2ed0f6c2af29e21b4d5e2a51e4598c4f60d21a6a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "EmptyKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
EmptyKernel<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
EmptyKernel<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
EmptyKernel<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4171a14ac4227b525d32a046ec9257ff7380ad91.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief
* context
*
* @copyright
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
* Xiaomi Corporation (author: Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include "k2/csrc/context.h"
#include "k2/csrc/eval.h"
namespace k2 {
RegionPtr NewRegion(ContextPtr &context, std::size_t num_bytes) {
// .. fairly straightforward. Sets bytes_used to num_bytes, caller can
// overwrite if needed.
auto ans = std::make_shared<Region>();
ans->context = context;
// TODO(haowen): deleter_context is always null with above constructor,
// we need add another constructor of Region to allow the caller
// to provide deleter_context.
ans->data = context->Allocate(num_bytes, &ans->deleter_context);
ans->num_bytes = num_bytes;
ans->bytes_used = num_bytes;
return ans;
}
ParallelRunner::ParallelRunner(ContextPtr c) : c_(c) {
if (c_->GetDeviceType() == kCuda) {
auto ret = hipEventCreate(&event_);
K2_CHECK_CUDA_ERROR(ret);
// record event on `c_->GetCudaStream` and will be waited on `NewStream`
ret = hipEventRecord(event_, c_->GetCudaStream());
K2_CHECK_CUDA_ERROR(ret);
}
}
hipStream_t ParallelRunner::NewStream() {
DeviceType d = c_->GetDeviceType();
if (d == kCpu) {
return kCudaStreamInvalid;
} else {
K2_CHECK_EQ(d, kCuda);
hipStream_t stream;
auto ret = hipStreamCreate(&stream);
K2_CHECK_CUDA_ERROR(ret);
streams_.push_back(stream);
ret = hipStreamWaitEvent(stream, event_, 0);
K2_CHECK_CUDA_ERROR(ret);
return stream;
}
}
void ParallelRunner::Finish() {
if (c_.get() == nullptr)
return;
if (c_->GetDeviceType() == kCuda) {
for (std::size_t i = 0; i != streams_.size(); ++i) {
// create and record event on `stream_[i]`, and wait on c_->GetCudaStream
hipEvent_t event;
auto ret = hipEventCreate(&event);
K2_CHECK_CUDA_ERROR(ret);
ret = hipEventRecord(event, streams_[i]);
K2_CHECK_CUDA_ERROR(ret);
ret = hipStreamWaitEvent(c_->GetCudaStream(), event, 0);
K2_CHECK_CUDA_ERROR(ret);
ret = hipEventDestroy(event);
K2_CHECK_CUDA_ERROR(ret);
ret = hipStreamDestroy(streams_[i]);
K2_CHECK_CUDA_ERROR(ret);
}
// destroy event_
auto ret = hipEventDestroy(event_);
K2_CHECK_CUDA_ERROR(ret);
}
c_ = nullptr;
}
void GetBlockSizesForLambda2(int32_t m, int32_t n,
dim3 *block_dim,
dim3 *grid_dim,
Lambda2KernelType *kernel_type) {
// Note: 'n' is the 'inner-loop' one, the one which is supposed to vary the
// fastest.
int32_t n_block_size = (n <= 256 ? n : 256);
int32_t m_block_size = 1;
while (m_block_size * n_block_size < 256)
m_block_size *= 4; // limit for the product is 1024; we don't go beyond
// 512. (128 * 4 = 512).
*block_dim = dim3(n_block_size, m_block_size, 1);
int32_t n_grid_size = NumBlocks(n, n_block_size),
m_grid_size = NumBlocks(m, m_block_size);
if (n_grid_size < 65536 && m_grid_size < 65536) {
*grid_dim = dim3(n_grid_size, m_grid_size, 1);
*kernel_type = Lambda2KernelType::Simple;
} else if (n_grid_size < 65536) {
// only m is problematic.
*grid_dim = dim3(n_grid_size, 32768, NumBlocks(m_grid_size, 32768));
*kernel_type = Lambda2KernelType::UseZForM;
} else {
// we know n is problematic.
if (m_grid_size > 65536) {
K2_LOG(FATAL) << "Grid too large for Eval2(): m=" << m << ", n=" << n;
}
// only n is problematic.
*grid_dim = dim3(32768, m_grid_size, NumBlocks(n_grid_size, 32768));
*kernel_type = Lambda2KernelType::UseZForN;
}
}
} // namespace k2
| 4171a14ac4227b525d32a046ec9257ff7380ad91.cu | /**
* @brief
* context
*
* @copyright
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
* Xiaomi Corporation (author: Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include "k2/csrc/context.h"
#include "k2/csrc/eval.h"
namespace k2 {
RegionPtr NewRegion(ContextPtr &context, std::size_t num_bytes) {
// .. fairly straightforward. Sets bytes_used to num_bytes, caller can
// overwrite if needed.
auto ans = std::make_shared<Region>();
ans->context = context;
// TODO(haowen): deleter_context is always null with above constructor,
// we need add another constructor of Region to allow the caller
// to provide deleter_context.
ans->data = context->Allocate(num_bytes, &ans->deleter_context);
ans->num_bytes = num_bytes;
ans->bytes_used = num_bytes;
return ans;
}
ParallelRunner::ParallelRunner(ContextPtr c) : c_(c) {
if (c_->GetDeviceType() == kCuda) {
auto ret = cudaEventCreate(&event_);
K2_CHECK_CUDA_ERROR(ret);
// record event on `c_->GetCudaStream` and will be waited on `NewStream`
ret = cudaEventRecord(event_, c_->GetCudaStream());
K2_CHECK_CUDA_ERROR(ret);
}
}
cudaStream_t ParallelRunner::NewStream() {
DeviceType d = c_->GetDeviceType();
if (d == kCpu) {
return kCudaStreamInvalid;
} else {
K2_CHECK_EQ(d, kCuda);
cudaStream_t stream;
auto ret = cudaStreamCreate(&stream);
K2_CHECK_CUDA_ERROR(ret);
streams_.push_back(stream);
ret = cudaStreamWaitEvent(stream, event_, 0);
K2_CHECK_CUDA_ERROR(ret);
return stream;
}
}
void ParallelRunner::Finish() {
if (c_.get() == nullptr)
return;
if (c_->GetDeviceType() == kCuda) {
for (std::size_t i = 0; i != streams_.size(); ++i) {
// create and record event on `stream_[i]`, and wait on c_->GetCudaStream
cudaEvent_t event;
auto ret = cudaEventCreate(&event);
K2_CHECK_CUDA_ERROR(ret);
ret = cudaEventRecord(event, streams_[i]);
K2_CHECK_CUDA_ERROR(ret);
ret = cudaStreamWaitEvent(c_->GetCudaStream(), event, 0);
K2_CHECK_CUDA_ERROR(ret);
ret = cudaEventDestroy(event);
K2_CHECK_CUDA_ERROR(ret);
ret = cudaStreamDestroy(streams_[i]);
K2_CHECK_CUDA_ERROR(ret);
}
// destroy event_
auto ret = cudaEventDestroy(event_);
K2_CHECK_CUDA_ERROR(ret);
}
c_ = nullptr;
}
void GetBlockSizesForLambda2(int32_t m, int32_t n,
dim3 *block_dim,
dim3 *grid_dim,
Lambda2KernelType *kernel_type) {
// Note: 'n' is the 'inner-loop' one, the one which is supposed to vary the
// fastest.
int32_t n_block_size = (n <= 256 ? n : 256);
int32_t m_block_size = 1;
while (m_block_size * n_block_size < 256)
m_block_size *= 4; // limit for the product is 1024; we don't go beyond
// 512. (128 * 4 = 512).
*block_dim = dim3(n_block_size, m_block_size, 1);
int32_t n_grid_size = NumBlocks(n, n_block_size),
m_grid_size = NumBlocks(m, m_block_size);
if (n_grid_size < 65536 && m_grid_size < 65536) {
*grid_dim = dim3(n_grid_size, m_grid_size, 1);
*kernel_type = Lambda2KernelType::Simple;
} else if (n_grid_size < 65536) {
// only m is problematic.
*grid_dim = dim3(n_grid_size, 32768, NumBlocks(m_grid_size, 32768));
*kernel_type = Lambda2KernelType::UseZForM;
} else {
// we know n is problematic.
if (m_grid_size > 65536) {
K2_LOG(FATAL) << "Grid too large for Eval2(): m=" << m << ", n=" << n;
}
// only n is problematic.
*grid_dim = dim3(32768, m_grid_size, NumBlocks(n_grid_size, 32768));
*kernel_type = Lambda2KernelType::UseZForN;
}
}
} // namespace k2
|
3b44d1fa7282274c11f30dc531d6878a674df5ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "im2col.h"
#include "hip/hip_runtime.h"
}
#if GPU
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK),
dim3(BLOCK), 0, 0,
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
#endif | 3b44d1fa7282274c11f30dc531d6878a674df5ab.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "im2col.h"
#include "cuda.h"
}
#if GPU
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
BLOCK>>>(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
#endif |
3ce22dddb6dd264e43f093fce807e52bf34f7263.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::reduce_scatter(size_t recv_count,
const Tensors2<TypeEmbeddingComp> &send_tensors,
Tensors2<TypeEmbeddingComp> &recv_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
// need to know the type of TypeHashKey here
ncclDataType_t type;
switch (sizeof(TypeEmbeddingComp)) {
case 2:
type = ncclHalf;
break;
case 4:
type = ncclFloat;
break;
default:
CK_THROW_(Error_t::WrongInput, "Error: TypeHashKey not support by now");
}
// for multi GPUs, use NCCL to do Reduce-Scatter(supporting multi-node GPU servers)
if (total_gpu_count > 1) {
CK_NCCL_THROW_(ncclGroupStart());
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
CK_NCCL_THROW_(ncclReduceScatter(send_tensors[id].get_ptr(), // send buf
recv_tensors[id].get_ptr(), // recv buff
recv_count, type, ncclSum, local_gpu->get_nccl(),
local_gpu->get_stream()));
}
CK_NCCL_THROW_(ncclGroupEnd());
}
// for single GPU, just do memcpyD2D
else { // total_gpu_count == 1
const auto &local_gpu = resource_manager.get_local_gpu(0);
CudaDeviceContext context(local_gpu->get_device_id());
CK_CUDA_THROW_(hipMemcpyAsync(recv_tensors[0].get_ptr(), send_tensors[0].get_ptr(),
recv_count * sizeof(TypeEmbeddingComp), hipMemcpyDeviceToDevice,
local_gpu->get_stream()));
}
return;
}
template void SparseEmbeddingFunctors::reduce_scatter<float>(
size_t recv_count, const Tensors2<float> &send_tensors, Tensors2<float> &recv_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::reduce_scatter<__half>(
size_t recv_count, const Tensors2<__half> &send_tensors, Tensors2<__half> &recv_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR | 3ce22dddb6dd264e43f093fce807e52bf34f7263.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::reduce_scatter(size_t recv_count,
const Tensors2<TypeEmbeddingComp> &send_tensors,
Tensors2<TypeEmbeddingComp> &recv_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
// need to know the type of TypeHashKey here
ncclDataType_t type;
switch (sizeof(TypeEmbeddingComp)) {
case 2:
type = ncclHalf;
break;
case 4:
type = ncclFloat;
break;
default:
CK_THROW_(Error_t::WrongInput, "Error: TypeHashKey not support by now");
}
// for multi GPUs, use NCCL to do Reduce-Scatter(supporting multi-node GPU servers)
if (total_gpu_count > 1) {
CK_NCCL_THROW_(ncclGroupStart());
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
CK_NCCL_THROW_(ncclReduceScatter(send_tensors[id].get_ptr(), // send buf
recv_tensors[id].get_ptr(), // recv buff
recv_count, type, ncclSum, local_gpu->get_nccl(),
local_gpu->get_stream()));
}
CK_NCCL_THROW_(ncclGroupEnd());
}
// for single GPU, just do memcpyD2D
else { // total_gpu_count == 1
const auto &local_gpu = resource_manager.get_local_gpu(0);
CudaDeviceContext context(local_gpu->get_device_id());
CK_CUDA_THROW_(cudaMemcpyAsync(recv_tensors[0].get_ptr(), send_tensors[0].get_ptr(),
recv_count * sizeof(TypeEmbeddingComp), cudaMemcpyDeviceToDevice,
local_gpu->get_stream()));
}
return;
}
template void SparseEmbeddingFunctors::reduce_scatter<float>(
size_t recv_count, const Tensors2<float> &send_tensors, Tensors2<float> &recv_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::reduce_scatter<__half>(
size_t recv_count, const Tensors2<__half> &send_tensors, Tensors2<__half> &recv_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR |
7bcee0ef383a1f12f325f28104a4da49fa19cd54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tfcc_cudaconvolutioninterface.h"
#include "exceptions/tfcc_cudaruntimeerror.h"
#include "exceptions/tfcc_cudnnruntimeerror.h"
#include "exceptions/tfcc_invalidargumenterror.h"
#include "exceptions/tfcc_notimplementederror.h"
#include "framework/tfcc_cudadevice.h"
#include "framework/tfcc_cudasession.h"
#include "framework/tfcc_cudatypes.h"
#include "framework/tfcc_session.h"
#include "framework/tfcc_types.h"
#include "utils/tfcc_cudnnutils.h"
namespace tfcc {
/**
* [s1, s2, s3] => [s1, s3, s2]
*/
template <class T>
static __global__ void _cuda_convolution_transpose(const T* a, unsigned s1, unsigned s2, unsigned s3, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
const unsigned total = s1 * s2 * s3;
for (unsigned i = tid; i < total; i += skip) {
unsigned ns1 = (i / (s2 * s3)) % s1;
unsigned ns2 = (i / s3) % s2;
unsigned ns3 = i % s3;
unsigned pos = ns1 * s2 * s3 + ns3 * s2 + ns2;
b[pos] = a[i];
}
}
template <class T>
CUDAConvolutionInterface<T>::CUDAConvolutionInterface(const CUDADeviceProperty& property)
: _property(property) {
}
template <class T>
CUDAConvolutionInterface<T>::~CUDAConvolutionInterface() {
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::conv2d(
const Tensor<T>& input, bool nhwc,
const Tensor<T>& kernel,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth,
unsigned dilateHeight, unsigned dilateWidth) {
unsigned batch = input.shape(0);
unsigned outChannels = kernel.shape(0);
unsigned inChannels = kernel.shape(1);
unsigned inHeight = input.shape(nhwc ? 1 : 2);
unsigned inWidth = input.shape(nhwc ? 2 : 3);
unsigned kernelHeight = kernel.shape(2);
unsigned kernelWidth = kernel.shape(3);
unsigned outHeight = (inHeight - kernelHeight + 2 * paddingHeight) / strideHeight + 1;
unsigned outWidth = (inWidth - kernelWidth + 2 * paddingWidth) / strideWidth + 1;
cudnnDataType_t dataType = CUDATypeTraits<T>::getCUDNNType();
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
Variable<T> output(nhwc ? Shape({batch, outHeight, outWidth, outChannels}) : Shape({batch, outChannels, outHeight, outWidth}));
cudnnTensorDescriptor_t inputDescriptor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&inputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnTensorDescriptorGuard inputGuard(&inputDescriptor);
ret = cudnnSetTensor4dDescriptor(
inputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
inChannels,
inHeight,
inWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// output tensor
cudnnTensorDescriptor_t outputDescriptor;
ret = cudnnCreateTensorDescriptor(&outputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnTensorDescriptorGuard outputGuard(&outputDescriptor);
ret = cudnnSetTensor4dDescriptor(
outputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
outChannels,
outHeight,
outWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// kernel tensor
cudnnFilterDescriptor_t kernelDescriptor;
ret = cudnnCreateFilterDescriptor(&kernelDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnFilterDescriptorGuard kernelGuard(&kernelDescriptor);
ret = cudnnSetFilter4dDescriptor(
kernelDescriptor,
dataType,
CUDNN_TENSOR_NCHW,
outChannels,
inChannels,
kernelHeight,
kernelWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// conv descriptor
cudnnConvolutionDescriptor_t convolutionDescriptor;
ret = cudnnCreateConvolutionDescriptor(&convolutionDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnConvolutionDescriptorGuard convGuard(&convolutionDescriptor);
tfcc::CUDADevice* device = static_cast<tfcc::CUDADevice*>(tfcc::Device::getThreadDefault());
#ifdef TFCC_USE_TENSOR_CORE
if (device->isTensorCoreEnabled()) {
ret = cudnnSetConvolutionMathType(convolutionDescriptor, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION); //CUDNN_TENSOR_OP_MATH);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
}
#endif
ret = cudnnSetConvolution2dDescriptor(
convolutionDescriptor,
paddingHeight,
paddingWidth,
strideHeight,
strideWidth,
dilateHeight,
dilateWidth,
CUDNN_CROSS_CORRELATION,
dataType);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// conv algorithm
cudnnConvolutionFwdAlgo_t convolutionAlgorithm;
if (device->isTensorCoreEnabled()) {
convolutionAlgorithm = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
} else {
ret = cudnnGetConvolutionForwardAlgorithm(
session->getImpl()->cudnnHandle(),
inputDescriptor,
kernelDescriptor,
convolutionDescriptor,
outputDescriptor,
CUDNN_CONVOLUTION_FWD_NO_WORKSPACE,
0,
&convolutionAlgorithm);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
}
// alloc workspace memory
size_t workspaceBytes = 0;
ret = cudnnGetConvolutionForwardWorkspaceSize(
session->getImpl()->cudnnHandle(),
inputDescriptor,
kernelDescriptor,
convolutionDescriptor,
outputDescriptor,
convolutionAlgorithm,
&workspaceBytes);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
unsigned tmpSize = static_cast<unsigned>((workspaceBytes + sizeof(T) - 1) / sizeof(T));
tmpSize = tmpSize == 0 ? 1 : tmpSize;
Variable<T> tmp({
tmpSize,
});
// run
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
ret = cudnnConvolutionForward(
session->getImpl()->cudnnHandle(),
&alpha,
inputDescriptor,
input.data(),
kernelDescriptor,
kernel.data(),
convolutionDescriptor,
convolutionAlgorithm,
tmp.data(),
workspaceBytes,
&beta,
outputDescriptor,
output.data());
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
return output;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::conv2d(
const Tensor<T>& input, bool nhwc,
const Tensor<T>& kernel,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth) {
unsigned batch = input.shape(0);
unsigned outChannels = kernel.shape(0);
unsigned inChannels = kernel.shape(1);
unsigned inHeight = input.shape(nhwc ? 1 : 2);
unsigned inWidth = input.shape(nhwc ? 2 : 3);
unsigned kernelHeight = kernel.shape(2);
unsigned kernelWidth = kernel.shape(3);
unsigned outHeight = (inHeight - kernelHeight + 2 * paddingHeight) / strideHeight + 1;
unsigned outWidth = (inWidth - kernelWidth + 2 * paddingWidth) / strideWidth + 1;
cudnnDataType_t dataType = CUDATypeTraits<T>::getCUDNNType();
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
Variable<T> output(nhwc ? Shape({batch, outHeight, outWidth, outChannels}) : Shape({batch, outChannels, outHeight, outWidth}));
cudnnTensorDescriptor_t inputDescriptor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&inputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnTensorDescriptorGuard inputGuard(&inputDescriptor);
ret = cudnnSetTensor4dDescriptor(
inputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
inChannels,
inHeight,
inWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// output tensor
cudnnTensorDescriptor_t outputDescriptor;
ret = cudnnCreateTensorDescriptor(&outputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnTensorDescriptorGuard outputGuard(&outputDescriptor);
ret = cudnnSetTensor4dDescriptor(
outputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
outChannels,
outHeight,
outWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// kernel tensor
cudnnFilterDescriptor_t kernelDescriptor;
ret = cudnnCreateFilterDescriptor(&kernelDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnFilterDescriptorGuard kernelGuard(&kernelDescriptor);
ret = cudnnSetFilter4dDescriptor(
kernelDescriptor,
dataType,
CUDNN_TENSOR_NCHW,
outChannels,
inChannels,
kernelHeight,
kernelWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// conv descriptor
cudnnConvolutionDescriptor_t convolutionDescriptor;
ret = cudnnCreateConvolutionDescriptor(&convolutionDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnConvolutionDescriptorGuard convGuard(&convolutionDescriptor);
tfcc::CUDADevice* device = static_cast<tfcc::CUDADevice*>(tfcc::Device::getThreadDefault());
#ifdef TFCC_USE_TENSOR_CORE
if (device->isTensorCoreEnabled()) {
ret = cudnnSetConvolutionMathType(convolutionDescriptor, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION); //CUDNN_TENSOR_OP_MATH);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
}
#endif
ret = cudnnSetConvolution2dDescriptor(
convolutionDescriptor,
paddingHeight,
paddingWidth,
strideHeight,
strideWidth,
1,
1,
CUDNN_CROSS_CORRELATION,
dataType);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// conv algorithm
cudnnConvolutionFwdAlgo_t convolutionAlgorithm;
if (device->isTensorCoreEnabled()) {
convolutionAlgorithm = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
} else {
ret = cudnnGetConvolutionForwardAlgorithm(
session->getImpl()->cudnnHandle(),
inputDescriptor,
kernelDescriptor,
convolutionDescriptor,
outputDescriptor,
CUDNN_CONVOLUTION_FWD_NO_WORKSPACE,
0,
&convolutionAlgorithm);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
}
// alloc workspace memory
size_t workspaceBytes = 0;
ret = cudnnGetConvolutionForwardWorkspaceSize(
session->getImpl()->cudnnHandle(),
inputDescriptor,
kernelDescriptor,
convolutionDescriptor,
outputDescriptor,
convolutionAlgorithm,
&workspaceBytes);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
unsigned tmpSize = static_cast<unsigned>((workspaceBytes + sizeof(T) - 1) / sizeof(T));
tmpSize = tmpSize == 0 ? 1 : tmpSize;
Variable<T> tmp({
tmpSize,
});
// run
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
ret = cudnnConvolutionForward(
session->getImpl()->cudnnHandle(),
&alpha,
inputDescriptor,
input.data(),
kernelDescriptor,
kernel.data(),
convolutionDescriptor,
convolutionAlgorithm,
tmp.data(),
workspaceBytes,
&beta,
outputDescriptor,
output.data());
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
return output;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::conv2dBackwardData(
const Tensor<T>& input, bool nhwc,
const Tensor<T>& kernel,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth) {
if (!nhwc) {
return conv2dBackwardDataNCHW(input, kernel, paddingHeight, paddingWidth, strideHeight, strideWidth);
}
Variable<T> realInput = nhwc2nchw(input);
Variable<T> output = conv2dBackwardDataNCHW(realInput, kernel, paddingHeight, paddingWidth, strideHeight, strideWidth);
return nchw2nhwc(output);
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::maxPool2d(
const Tensor<T>& input, bool nhwc,
unsigned kernelHeight, unsigned kernelWidth,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth) {
unsigned batch = input.shape(0);
unsigned outChannels = nhwc ? input.shape(3) : input.shape(1);
unsigned inChannels = outChannels;
unsigned inHeight = nhwc ? input.shape(1) : input.shape(2);
unsigned inWidth = nhwc ? input.shape(2) : input.shape(3);
unsigned outHeight = (inHeight - kernelHeight + 2 * paddingHeight) / strideHeight + 1;
unsigned outWidth = (inWidth - kernelWidth + 2 * paddingWidth) / strideWidth + 1;
cudnnDataType_t dataType = CUDATypeTraits<T>::getCUDNNType();
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
Variable<T> output(nhwc ? Shape({batch, outHeight, outWidth, outChannels}) : Shape({batch, outChannels, outHeight, outWidth}));
cudnnTensorDescriptor_t inputDescriptor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&inputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
inputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
inChannels,
inHeight,
inWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnTensorDescriptor_t outputDescriptor;
ret = cudnnCreateTensorDescriptor(&outputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
outputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
outChannels,
outHeight,
outWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnPoolingDescriptor_t poolingDescriptor;
ret = cudnnCreatePoolingDescriptor(&poolingDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetPooling2dDescriptor(
poolingDescriptor,
CUDNN_POOLING_MAX,
CUDNN_NOT_PROPAGATE_NAN,
kernelHeight,
kernelWidth,
paddingHeight,
paddingWidth,
strideHeight,
strideWidth);
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
ret = cudnnPoolingForward(
session->getImpl()->cudnnHandle(),
poolingDescriptor,
&alpha,
inputDescriptor,
input.data(),
&beta,
outputDescriptor,
output.data());
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnDestroyTensorDescriptor(inputDescriptor);
cudnnDestroyTensorDescriptor(outputDescriptor);
cudnnDestroyPoolingDescriptor(poolingDescriptor);
return output;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::avgPool2d(
const Tensor<T>& input, bool nhwc,
unsigned kernelHeight, unsigned kernelWidth,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth) {
unsigned batch = input.shape(0);
unsigned outChannels = nhwc ? input.shape(3) : input.shape(1);
unsigned inChannels = outChannels;
unsigned inHeight = nhwc ? input.shape(1) : input.shape(2);
unsigned inWidth = nhwc ? input.shape(2) : input.shape(3);
unsigned outHeight = (inHeight - kernelHeight + 2 * paddingHeight) / strideHeight + 1;
unsigned outWidth = (inWidth - kernelWidth + 2 * paddingWidth) / strideWidth + 1;
cudnnDataType_t dataType = CUDATypeTraits<T>::getCUDNNType();
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
Variable<T> output(nhwc ? Shape({batch, outHeight, outWidth, outChannels}) : Shape({batch, outChannels, outHeight, outWidth}));
cudnnTensorDescriptor_t inputDescriptor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&inputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
inputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
inChannels,
inHeight,
inWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnTensorDescriptor_t outputDescriptor;
ret = cudnnCreateTensorDescriptor(&outputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
outputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
outChannels,
outHeight,
outWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnPoolingDescriptor_t poolingDescriptor;
ret = cudnnCreatePoolingDescriptor(&poolingDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetPooling2dDescriptor(
poolingDescriptor,
CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING,
CUDNN_NOT_PROPAGATE_NAN,
kernelHeight,
kernelWidth,
paddingHeight,
paddingWidth,
strideHeight,
strideWidth);
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
ret = cudnnPoolingForward(
session->getImpl()->cudnnHandle(),
poolingDescriptor,
&alpha,
inputDescriptor,
input.data(),
&beta,
outputDescriptor,
output.data());
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnDestroyTensorDescriptor(inputDescriptor);
cudnnDestroyTensorDescriptor(outputDescriptor);
cudnnDestroyPoolingDescriptor(poolingDescriptor);
return output;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::conv2dBackwardDataNCHW(
const Tensor<T>& input,
const Tensor<T>& kernel,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth) {
unsigned batch = input.shape(0);
unsigned inHeight = input.shape(2);
unsigned inWidth = input.shape(3);
unsigned kernelHeight = kernel.shape(2);
unsigned kernelWidth = kernel.shape(3);
unsigned outHeight = (inHeight - 1) * strideHeight + kernelHeight - 2 * paddingHeight;
unsigned outWidth = (inWidth - 1) * strideWidth + kernelWidth - 2 * paddingWidth;
unsigned inChannels = kernel.shape(0);
unsigned outChannels = kernel.shape(1);
cudnnDataType_t dataType = CUDATypeTraits<T>::getCUDNNType();
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
Variable<T> output({batch, outChannels, outHeight, outWidth});
cudnnTensorDescriptor_t outputDescriptor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&outputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
outputDescriptor,
CUDNN_TENSOR_NCHW,
dataType,
batch,
outChannels,
outHeight,
outWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnTensorDescriptor_t inputDescriptor;
ret = cudnnCreateTensorDescriptor(&inputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
inputDescriptor,
CUDNN_TENSOR_NCHW,
dataType,
batch,
inChannels,
inHeight,
inWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnFilterDescriptor_t kernelDescriptor;
ret = cudnnCreateFilterDescriptor(&kernelDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetFilter4dDescriptor(
kernelDescriptor,
dataType,
CUDNN_TENSOR_NCHW,
inChannels,
outChannels,
kernelHeight,
kernelWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnConvolutionDescriptor_t convolutionDescriptor;
ret = cudnnCreateConvolutionDescriptor(&convolutionDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetConvolution2dDescriptor(
convolutionDescriptor,
paddingHeight,
paddingWidth,
strideHeight,
strideWidth,
1,
1,
CUDNN_CROSS_CORRELATION,
dataType);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnConvolutionBwdDataAlgo_t convolutionAlgorithm;
ret = cudnnGetConvolutionBackwardDataAlgorithm(
session->getImpl()->cudnnHandle(),
kernelDescriptor,
inputDescriptor,
convolutionDescriptor,
outputDescriptor,
CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE,
0,
&convolutionAlgorithm);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
size_t workspaceBytes = 0;
ret = cudnnGetConvolutionBackwardDataWorkspaceSize(
session->getImpl()->cudnnHandle(),
kernelDescriptor,
inputDescriptor,
convolutionDescriptor,
outputDescriptor,
convolutionAlgorithm,
&workspaceBytes);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
unsigned tmpSize = static_cast<unsigned>((workspaceBytes + sizeof(T) - 1) / sizeof(T));
tmpSize = tmpSize == 0 ? 1 : tmpSize;
Variable<T> tmp({
tmpSize,
});
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
ret = cudnnConvolutionBackwardData(
session->getImpl()->cudnnHandle(),
&alpha,
kernelDescriptor,
kernel.data(),
inputDescriptor,
input.data(),
convolutionDescriptor,
convolutionAlgorithm,
tmp.data(),
workspaceBytes,
&beta,
outputDescriptor,
output.data());
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnDestroyTensorDescriptor(inputDescriptor);
cudnnDestroyTensorDescriptor(outputDescriptor);
cudnnDestroyFilterDescriptor(kernelDescriptor);
cudnnDestroyConvolutionDescriptor(convolutionDescriptor);
return output;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::nhwc2nchw(const Tensor<T>& a) {
Variable<T> result({a.shape(0), a.shape(3), a.shape(1), a.shape(2)});
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(result.size());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_convolution_transpose), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(),
a.shape(0), a.shape(1) * a.shape(2), a.shape(3),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::nchw2nhwc(const Tensor<T>& a) {
Variable<T> result({a.shape(0), a.shape(2), a.shape(3), a.shape(1)});
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(result.size());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_convolution_transpose), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(),
a.shape(0), a.shape(1), a.shape(2) * a.shape(3),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
#define DEFINE_FUNC(type) template class CUDAConvolutionInterface<type>;
TFCC_FOR_ALL_TYPES(DEFINE_FUNC);
} // namespace tfcc
| 7bcee0ef383a1f12f325f28104a4da49fa19cd54.cu |
#include "tfcc_cudaconvolutioninterface.h"
#include "exceptions/tfcc_cudaruntimeerror.h"
#include "exceptions/tfcc_cudnnruntimeerror.h"
#include "exceptions/tfcc_invalidargumenterror.h"
#include "exceptions/tfcc_notimplementederror.h"
#include "framework/tfcc_cudadevice.h"
#include "framework/tfcc_cudasession.h"
#include "framework/tfcc_cudatypes.h"
#include "framework/tfcc_session.h"
#include "framework/tfcc_types.h"
#include "utils/tfcc_cudnnutils.h"
namespace tfcc {
/**
* [s1, s2, s3] => [s1, s3, s2]
*/
template <class T>
static __global__ void _cuda_convolution_transpose(const T* a, unsigned s1, unsigned s2, unsigned s3, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
const unsigned total = s1 * s2 * s3;
for (unsigned i = tid; i < total; i += skip) {
unsigned ns1 = (i / (s2 * s3)) % s1;
unsigned ns2 = (i / s3) % s2;
unsigned ns3 = i % s3;
unsigned pos = ns1 * s2 * s3 + ns3 * s2 + ns2;
b[pos] = a[i];
}
}
template <class T>
CUDAConvolutionInterface<T>::CUDAConvolutionInterface(const CUDADeviceProperty& property)
: _property(property) {
}
template <class T>
CUDAConvolutionInterface<T>::~CUDAConvolutionInterface() {
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::conv2d(
const Tensor<T>& input, bool nhwc,
const Tensor<T>& kernel,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth,
unsigned dilateHeight, unsigned dilateWidth) {
unsigned batch = input.shape(0);
unsigned outChannels = kernel.shape(0);
unsigned inChannels = kernel.shape(1);
unsigned inHeight = input.shape(nhwc ? 1 : 2);
unsigned inWidth = input.shape(nhwc ? 2 : 3);
unsigned kernelHeight = kernel.shape(2);
unsigned kernelWidth = kernel.shape(3);
unsigned outHeight = (inHeight - kernelHeight + 2 * paddingHeight) / strideHeight + 1;
unsigned outWidth = (inWidth - kernelWidth + 2 * paddingWidth) / strideWidth + 1;
cudnnDataType_t dataType = CUDATypeTraits<T>::getCUDNNType();
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
Variable<T> output(nhwc ? Shape({batch, outHeight, outWidth, outChannels}) : Shape({batch, outChannels, outHeight, outWidth}));
cudnnTensorDescriptor_t inputDescriptor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&inputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnTensorDescriptorGuard inputGuard(&inputDescriptor);
ret = cudnnSetTensor4dDescriptor(
inputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
inChannels,
inHeight,
inWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// output tensor
cudnnTensorDescriptor_t outputDescriptor;
ret = cudnnCreateTensorDescriptor(&outputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnTensorDescriptorGuard outputGuard(&outputDescriptor);
ret = cudnnSetTensor4dDescriptor(
outputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
outChannels,
outHeight,
outWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// kernel tensor
cudnnFilterDescriptor_t kernelDescriptor;
ret = cudnnCreateFilterDescriptor(&kernelDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnFilterDescriptorGuard kernelGuard(&kernelDescriptor);
ret = cudnnSetFilter4dDescriptor(
kernelDescriptor,
dataType,
CUDNN_TENSOR_NCHW,
outChannels,
inChannels,
kernelHeight,
kernelWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// conv descriptor
cudnnConvolutionDescriptor_t convolutionDescriptor;
ret = cudnnCreateConvolutionDescriptor(&convolutionDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnConvolutionDescriptorGuard convGuard(&convolutionDescriptor);
tfcc::CUDADevice* device = static_cast<tfcc::CUDADevice*>(tfcc::Device::getThreadDefault());
#ifdef TFCC_USE_TENSOR_CORE
if (device->isTensorCoreEnabled()) {
ret = cudnnSetConvolutionMathType(convolutionDescriptor, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION); //CUDNN_TENSOR_OP_MATH);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
}
#endif
ret = cudnnSetConvolution2dDescriptor(
convolutionDescriptor,
paddingHeight,
paddingWidth,
strideHeight,
strideWidth,
dilateHeight,
dilateWidth,
CUDNN_CROSS_CORRELATION,
dataType);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// conv algorithm
cudnnConvolutionFwdAlgo_t convolutionAlgorithm;
if (device->isTensorCoreEnabled()) {
convolutionAlgorithm = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
} else {
ret = cudnnGetConvolutionForwardAlgorithm(
session->getImpl()->cudnnHandle(),
inputDescriptor,
kernelDescriptor,
convolutionDescriptor,
outputDescriptor,
CUDNN_CONVOLUTION_FWD_NO_WORKSPACE,
0,
&convolutionAlgorithm);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
}
// alloc workspace memory
size_t workspaceBytes = 0;
ret = cudnnGetConvolutionForwardWorkspaceSize(
session->getImpl()->cudnnHandle(),
inputDescriptor,
kernelDescriptor,
convolutionDescriptor,
outputDescriptor,
convolutionAlgorithm,
&workspaceBytes);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
unsigned tmpSize = static_cast<unsigned>((workspaceBytes + sizeof(T) - 1) / sizeof(T));
tmpSize = tmpSize == 0 ? 1 : tmpSize;
Variable<T> tmp({
tmpSize,
});
// run
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
ret = cudnnConvolutionForward(
session->getImpl()->cudnnHandle(),
&alpha,
inputDescriptor,
input.data(),
kernelDescriptor,
kernel.data(),
convolutionDescriptor,
convolutionAlgorithm,
tmp.data(),
workspaceBytes,
&beta,
outputDescriptor,
output.data());
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
return output;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::conv2d(
const Tensor<T>& input, bool nhwc,
const Tensor<T>& kernel,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth) {
unsigned batch = input.shape(0);
unsigned outChannels = kernel.shape(0);
unsigned inChannels = kernel.shape(1);
unsigned inHeight = input.shape(nhwc ? 1 : 2);
unsigned inWidth = input.shape(nhwc ? 2 : 3);
unsigned kernelHeight = kernel.shape(2);
unsigned kernelWidth = kernel.shape(3);
unsigned outHeight = (inHeight - kernelHeight + 2 * paddingHeight) / strideHeight + 1;
unsigned outWidth = (inWidth - kernelWidth + 2 * paddingWidth) / strideWidth + 1;
cudnnDataType_t dataType = CUDATypeTraits<T>::getCUDNNType();
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
Variable<T> output(nhwc ? Shape({batch, outHeight, outWidth, outChannels}) : Shape({batch, outChannels, outHeight, outWidth}));
cudnnTensorDescriptor_t inputDescriptor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&inputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnTensorDescriptorGuard inputGuard(&inputDescriptor);
ret = cudnnSetTensor4dDescriptor(
inputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
inChannels,
inHeight,
inWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// output tensor
cudnnTensorDescriptor_t outputDescriptor;
ret = cudnnCreateTensorDescriptor(&outputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnTensorDescriptorGuard outputGuard(&outputDescriptor);
ret = cudnnSetTensor4dDescriptor(
outputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
outChannels,
outHeight,
outWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// kernel tensor
cudnnFilterDescriptor_t kernelDescriptor;
ret = cudnnCreateFilterDescriptor(&kernelDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnFilterDescriptorGuard kernelGuard(&kernelDescriptor);
ret = cudnnSetFilter4dDescriptor(
kernelDescriptor,
dataType,
CUDNN_TENSOR_NCHW,
outChannels,
inChannels,
kernelHeight,
kernelWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// conv descriptor
cudnnConvolutionDescriptor_t convolutionDescriptor;
ret = cudnnCreateConvolutionDescriptor(&convolutionDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnConvolutionDescriptorGuard convGuard(&convolutionDescriptor);
tfcc::CUDADevice* device = static_cast<tfcc::CUDADevice*>(tfcc::Device::getThreadDefault());
#ifdef TFCC_USE_TENSOR_CORE
if (device->isTensorCoreEnabled()) {
ret = cudnnSetConvolutionMathType(convolutionDescriptor, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION); //CUDNN_TENSOR_OP_MATH);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
}
#endif
ret = cudnnSetConvolution2dDescriptor(
convolutionDescriptor,
paddingHeight,
paddingWidth,
strideHeight,
strideWidth,
1,
1,
CUDNN_CROSS_CORRELATION,
dataType);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
// conv algorithm
cudnnConvolutionFwdAlgo_t convolutionAlgorithm;
if (device->isTensorCoreEnabled()) {
convolutionAlgorithm = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
} else {
ret = cudnnGetConvolutionForwardAlgorithm(
session->getImpl()->cudnnHandle(),
inputDescriptor,
kernelDescriptor,
convolutionDescriptor,
outputDescriptor,
CUDNN_CONVOLUTION_FWD_NO_WORKSPACE,
0,
&convolutionAlgorithm);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
}
// alloc workspace memory
size_t workspaceBytes = 0;
ret = cudnnGetConvolutionForwardWorkspaceSize(
session->getImpl()->cudnnHandle(),
inputDescriptor,
kernelDescriptor,
convolutionDescriptor,
outputDescriptor,
convolutionAlgorithm,
&workspaceBytes);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
unsigned tmpSize = static_cast<unsigned>((workspaceBytes + sizeof(T) - 1) / sizeof(T));
tmpSize = tmpSize == 0 ? 1 : tmpSize;
Variable<T> tmp({
tmpSize,
});
// run
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
ret = cudnnConvolutionForward(
session->getImpl()->cudnnHandle(),
&alpha,
inputDescriptor,
input.data(),
kernelDescriptor,
kernel.data(),
convolutionDescriptor,
convolutionAlgorithm,
tmp.data(),
workspaceBytes,
&beta,
outputDescriptor,
output.data());
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
return output;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::conv2dBackwardData(
const Tensor<T>& input, bool nhwc,
const Tensor<T>& kernel,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth) {
if (!nhwc) {
return conv2dBackwardDataNCHW(input, kernel, paddingHeight, paddingWidth, strideHeight, strideWidth);
}
Variable<T> realInput = nhwc2nchw(input);
Variable<T> output = conv2dBackwardDataNCHW(realInput, kernel, paddingHeight, paddingWidth, strideHeight, strideWidth);
return nchw2nhwc(output);
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::maxPool2d(
const Tensor<T>& input, bool nhwc,
unsigned kernelHeight, unsigned kernelWidth,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth) {
unsigned batch = input.shape(0);
unsigned outChannels = nhwc ? input.shape(3) : input.shape(1);
unsigned inChannels = outChannels;
unsigned inHeight = nhwc ? input.shape(1) : input.shape(2);
unsigned inWidth = nhwc ? input.shape(2) : input.shape(3);
unsigned outHeight = (inHeight - kernelHeight + 2 * paddingHeight) / strideHeight + 1;
unsigned outWidth = (inWidth - kernelWidth + 2 * paddingWidth) / strideWidth + 1;
cudnnDataType_t dataType = CUDATypeTraits<T>::getCUDNNType();
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
Variable<T> output(nhwc ? Shape({batch, outHeight, outWidth, outChannels}) : Shape({batch, outChannels, outHeight, outWidth}));
cudnnTensorDescriptor_t inputDescriptor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&inputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
inputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
inChannels,
inHeight,
inWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnTensorDescriptor_t outputDescriptor;
ret = cudnnCreateTensorDescriptor(&outputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
outputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
outChannels,
outHeight,
outWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnPoolingDescriptor_t poolingDescriptor;
ret = cudnnCreatePoolingDescriptor(&poolingDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetPooling2dDescriptor(
poolingDescriptor,
CUDNN_POOLING_MAX,
CUDNN_NOT_PROPAGATE_NAN,
kernelHeight,
kernelWidth,
paddingHeight,
paddingWidth,
strideHeight,
strideWidth);
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
ret = cudnnPoolingForward(
session->getImpl()->cudnnHandle(),
poolingDescriptor,
&alpha,
inputDescriptor,
input.data(),
&beta,
outputDescriptor,
output.data());
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnDestroyTensorDescriptor(inputDescriptor);
cudnnDestroyTensorDescriptor(outputDescriptor);
cudnnDestroyPoolingDescriptor(poolingDescriptor);
return output;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::avgPool2d(
const Tensor<T>& input, bool nhwc,
unsigned kernelHeight, unsigned kernelWidth,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth) {
unsigned batch = input.shape(0);
unsigned outChannels = nhwc ? input.shape(3) : input.shape(1);
unsigned inChannels = outChannels;
unsigned inHeight = nhwc ? input.shape(1) : input.shape(2);
unsigned inWidth = nhwc ? input.shape(2) : input.shape(3);
unsigned outHeight = (inHeight - kernelHeight + 2 * paddingHeight) / strideHeight + 1;
unsigned outWidth = (inWidth - kernelWidth + 2 * paddingWidth) / strideWidth + 1;
cudnnDataType_t dataType = CUDATypeTraits<T>::getCUDNNType();
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
Variable<T> output(nhwc ? Shape({batch, outHeight, outWidth, outChannels}) : Shape({batch, outChannels, outHeight, outWidth}));
cudnnTensorDescriptor_t inputDescriptor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&inputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
inputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
inChannels,
inHeight,
inWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnTensorDescriptor_t outputDescriptor;
ret = cudnnCreateTensorDescriptor(&outputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
outputDescriptor,
nhwc ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW,
dataType,
batch,
outChannels,
outHeight,
outWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnPoolingDescriptor_t poolingDescriptor;
ret = cudnnCreatePoolingDescriptor(&poolingDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetPooling2dDescriptor(
poolingDescriptor,
CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING,
CUDNN_NOT_PROPAGATE_NAN,
kernelHeight,
kernelWidth,
paddingHeight,
paddingWidth,
strideHeight,
strideWidth);
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
ret = cudnnPoolingForward(
session->getImpl()->cudnnHandle(),
poolingDescriptor,
&alpha,
inputDescriptor,
input.data(),
&beta,
outputDescriptor,
output.data());
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnDestroyTensorDescriptor(inputDescriptor);
cudnnDestroyTensorDescriptor(outputDescriptor);
cudnnDestroyPoolingDescriptor(poolingDescriptor);
return output;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::conv2dBackwardDataNCHW(
const Tensor<T>& input,
const Tensor<T>& kernel,
unsigned paddingHeight, unsigned paddingWidth,
unsigned strideHeight, unsigned strideWidth) {
unsigned batch = input.shape(0);
unsigned inHeight = input.shape(2);
unsigned inWidth = input.shape(3);
unsigned kernelHeight = kernel.shape(2);
unsigned kernelWidth = kernel.shape(3);
unsigned outHeight = (inHeight - 1) * strideHeight + kernelHeight - 2 * paddingHeight;
unsigned outWidth = (inWidth - 1) * strideWidth + kernelWidth - 2 * paddingWidth;
unsigned inChannels = kernel.shape(0);
unsigned outChannels = kernel.shape(1);
cudnnDataType_t dataType = CUDATypeTraits<T>::getCUDNNType();
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
Variable<T> output({batch, outChannels, outHeight, outWidth});
cudnnTensorDescriptor_t outputDescriptor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&outputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
outputDescriptor,
CUDNN_TENSOR_NCHW,
dataType,
batch,
outChannels,
outHeight,
outWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnTensorDescriptor_t inputDescriptor;
ret = cudnnCreateTensorDescriptor(&inputDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetTensor4dDescriptor(
inputDescriptor,
CUDNN_TENSOR_NCHW,
dataType,
batch,
inChannels,
inHeight,
inWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnFilterDescriptor_t kernelDescriptor;
ret = cudnnCreateFilterDescriptor(&kernelDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetFilter4dDescriptor(
kernelDescriptor,
dataType,
CUDNN_TENSOR_NCHW,
inChannels,
outChannels,
kernelHeight,
kernelWidth);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnConvolutionDescriptor_t convolutionDescriptor;
ret = cudnnCreateConvolutionDescriptor(&convolutionDescriptor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
ret = cudnnSetConvolution2dDescriptor(
convolutionDescriptor,
paddingHeight,
paddingWidth,
strideHeight,
strideWidth,
1,
1,
CUDNN_CROSS_CORRELATION,
dataType);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnConvolutionBwdDataAlgo_t convolutionAlgorithm;
ret = cudnnGetConvolutionBackwardDataAlgorithm(
session->getImpl()->cudnnHandle(),
kernelDescriptor,
inputDescriptor,
convolutionDescriptor,
outputDescriptor,
CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE,
0,
&convolutionAlgorithm);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
size_t workspaceBytes = 0;
ret = cudnnGetConvolutionBackwardDataWorkspaceSize(
session->getImpl()->cudnnHandle(),
kernelDescriptor,
inputDescriptor,
convolutionDescriptor,
outputDescriptor,
convolutionAlgorithm,
&workspaceBytes);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
unsigned tmpSize = static_cast<unsigned>((workspaceBytes + sizeof(T) - 1) / sizeof(T));
tmpSize = tmpSize == 0 ? 1 : tmpSize;
Variable<T> tmp({
tmpSize,
});
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
ret = cudnnConvolutionBackwardData(
session->getImpl()->cudnnHandle(),
&alpha,
kernelDescriptor,
kernel.data(),
inputDescriptor,
input.data(),
convolutionDescriptor,
convolutionAlgorithm,
tmp.data(),
workspaceBytes,
&beta,
outputDescriptor,
output.data());
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
cudnnDestroyTensorDescriptor(inputDescriptor);
cudnnDestroyTensorDescriptor(outputDescriptor);
cudnnDestroyFilterDescriptor(kernelDescriptor);
cudnnDestroyConvolutionDescriptor(convolutionDescriptor);
return output;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::nhwc2nchw(const Tensor<T>& a) {
Variable<T> result({a.shape(0), a.shape(3), a.shape(1), a.shape(2)});
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(result.size());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_convolution_transpose<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(),
a.shape(0), a.shape(1) * a.shape(2), a.shape(3),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T>
Variable<T> CUDAConvolutionInterface<T>::nchw2nhwc(const Tensor<T>& a) {
Variable<T> result({a.shape(0), a.shape(2), a.shape(3), a.shape(1)});
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(result.size());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_convolution_transpose<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(),
a.shape(0), a.shape(1), a.shape(2) * a.shape(3),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
#define DEFINE_FUNC(type) template class CUDAConvolutionInterface<type>;
TFCC_FOR_ALL_TYPES(DEFINE_FUNC);
} // namespace tfcc
|
0507471bf57b3d1ff19f7550a6e333bc983f2b1e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <device_launch_parameters.h>
__global__ void kernelTest() {
__shared__ int shrd[50];
//shrd = hipMalloc((void **) &shrd, 50*sizeof(int));
//int blockX, blockY, blockZ;
//int threadX, threadY, threadZ;
shrd[0] = blockIdx.x;
shrd[1] = blockIdx.y;
shrd[2] = blockIdx.z;
shrd[3] = threadIdx.x;
shrd[4] = threadIdx.y;
shrd[6] = threadIdx.z;
// printf("Block x: %d - y: %d - z: %d\n", blockX, blockY, blockZ);
// printf("Thread x: %d - y: %d - z: %d\n", threadX, threadY, threadZ);
// __syncthreads();
return ;
}
void test(){
dim3 blockSize(3, 3, 3);
dim3 kernelSize(3, 3, 3);
hipLaunchKernelGGL(( kernelTest), dim3(kernelSize), dim3(blockSize), 0, 0, );
hipDeviceSynchronize();
} | 0507471bf57b3d1ff19f7550a6e333bc983f2b1e.cu | #include <cuda_runtime.h>
#include <iostream>
#include <stdio.h>
#include <device_launch_parameters.h>
__global__ void kernelTest() {
__shared__ int shrd[50];
//shrd = cudaMalloc((void **) &shrd, 50*sizeof(int));
//int blockX, blockY, blockZ;
//int threadX, threadY, threadZ;
shrd[0] = blockIdx.x;
shrd[1] = blockIdx.y;
shrd[2] = blockIdx.z;
shrd[3] = threadIdx.x;
shrd[4] = threadIdx.y;
shrd[6] = threadIdx.z;
// printf("Block x: %d - y: %d - z: %d\n", blockX, blockY, blockZ);
// printf("Thread x: %d - y: %d - z: %d\n", threadX, threadY, threadZ);
// __syncthreads();
return ;
}
void test(){
dim3 blockSize(3, 3, 3);
dim3 kernelSize(3, 3, 3);
kernelTest<<<kernelSize, blockSize>>>();
cudaDeviceSynchronize();
} |
c7ff2568976db84890c8aa71bfd506e7abbb646a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/ml_benchmark.hpp>
#include <cuml/matrix/kernelparams.h>
#include <matrix/grammatrix.cuh>
#include <matrix/kernelfactory.cuh>
#include <memory>
// #TODO: Replace with public header when ready
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/random/rng.hpp>
#include <sstream>
#include <string>
#include <vector>
namespace MLCommon {
namespace Bench {
namespace Matrix {
using namespace MLCommon::Matrix;
struct GramTestParams {
int m; // m parameter of the GEMM
int k; // k parameter of the GEMM
int n; // n parameter of the GEMM
KernelParams kernel_params;
bool is_row_major;
}; // struct GramTestParams
template <typename T>
struct GramMatrix : public Fixture {
GramMatrix(const std::string& name, const GramTestParams& p)
: Fixture(name), params(p), A(0, stream), B(0, stream), C(0, stream)
{
std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"};
std::ostringstream oss;
oss << name << "/" << kernel_names[p.kernel_params.kernel] << "/" << p.m << "x" << p.k << "x"
<< p.n << "/" << (p.is_row_major ? "row_major" : "col_major");
this->SetName(oss.str().c_str());
RAFT_CUBLAS_TRY(hipblasCreate(&cublas_handle));
kernel =
std::unique_ptr<GramMatrixBase<T>>(KernelFactory<T>::create(p.kernel_params, cublas_handle));
}
~GramMatrix() { RAFT_CUBLAS_TRY_NO_THROW(hipblasDestroy(cublas_handle)); }
protected:
void allocateBuffers(const ::benchmark::State& state) override
{
A.resize(params.m * params.k, stream);
B.resize(params.k * params.n, stream);
C.resize(params.m * params.n, stream);
raft::random::Rng r(123456ULL);
r.uniform(A.data(), params.m * params.k, T(-1.0), T(1.0), stream);
r.uniform(B.data(), params.k * params.n, T(-1.0), T(1.0), stream);
}
void deallocateBuffers(const ::benchmark::State& state) override
{
A.release();
B.release();
C.release();
}
void runBenchmark(::benchmark::State& state) override
{
if (!this->kernel) { state.SkipWithError("Kernel matrix is not initialized"); }
loopOnState(state, [this]() {
(*this->kernel)(A.data(),
this->params.m,
this->params.k,
B.data(),
this->params.n,
C.data(),
this->params.is_row_major,
this->stream);
});
}
private:
hipblasHandle_t cublas_handle;
std::unique_ptr<GramMatrixBase<T>> kernel;
GramTestParams params;
rmm::device_uvector<T> A; // input matrix A, size [m * k]
rmm::device_uvector<T> B; // input matrix B, size [n * k]
rmm::device_uvector<T> C; // output matrix C, size [m*n]
};
static std::vector<GramTestParams> getInputs()
{
std::vector<GramTestParams> param_vec;
std::vector<KernelParams> kernel_params{KernelParams{LINEAR, 3, 1, 0},
KernelParams{POLYNOMIAL, 2, 1.3, 1},
KernelParams{TANH, 2, 0.5, 2.4},
KernelParams{RBF, 2, 0.5, 0}};
struct TestSize {
int m;
int k;
int n;
};
std::vector<TestSize> data_size{{4096, 10, 1024},
{4096, 100, 1024},
{4096, 1000, 1024},
{4096, 10000, 1024},
{100000, 10, 1024},
{100000, 100, 1024},
{100000, 1000, 1024}};
param_vec.reserve(kernel_params.size() * data_size.size());
for (TestSize s : data_size) {
for (auto kernel : kernel_params) {
for (bool row_major : {false, true}) {
param_vec.push_back(GramTestParams{s.m, s.k, s.n, kernel, row_major});
}
}
}
return param_vec;
}
ML_BENCH_REGISTER(GramTestParams, GramMatrix<float>, "", getInputs());
ML_BENCH_REGISTER(GramTestParams, GramMatrix<double>, "", getInputs());
} // namespace Matrix
} // namespace Bench
} // namespace MLCommon
| c7ff2568976db84890c8aa71bfd506e7abbb646a.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/ml_benchmark.hpp>
#include <cuml/matrix/kernelparams.h>
#include <matrix/grammatrix.cuh>
#include <matrix/kernelfactory.cuh>
#include <memory>
// #TODO: Replace with public header when ready
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/random/rng.hpp>
#include <sstream>
#include <string>
#include <vector>
namespace MLCommon {
namespace Bench {
namespace Matrix {
using namespace MLCommon::Matrix;
struct GramTestParams {
int m; // m parameter of the GEMM
int k; // k parameter of the GEMM
int n; // n parameter of the GEMM
KernelParams kernel_params;
bool is_row_major;
}; // struct GramTestParams
template <typename T>
struct GramMatrix : public Fixture {
GramMatrix(const std::string& name, const GramTestParams& p)
: Fixture(name), params(p), A(0, stream), B(0, stream), C(0, stream)
{
std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"};
std::ostringstream oss;
oss << name << "/" << kernel_names[p.kernel_params.kernel] << "/" << p.m << "x" << p.k << "x"
<< p.n << "/" << (p.is_row_major ? "row_major" : "col_major");
this->SetName(oss.str().c_str());
RAFT_CUBLAS_TRY(cublasCreate(&cublas_handle));
kernel =
std::unique_ptr<GramMatrixBase<T>>(KernelFactory<T>::create(p.kernel_params, cublas_handle));
}
~GramMatrix() { RAFT_CUBLAS_TRY_NO_THROW(cublasDestroy(cublas_handle)); }
protected:
void allocateBuffers(const ::benchmark::State& state) override
{
A.resize(params.m * params.k, stream);
B.resize(params.k * params.n, stream);
C.resize(params.m * params.n, stream);
raft::random::Rng r(123456ULL);
r.uniform(A.data(), params.m * params.k, T(-1.0), T(1.0), stream);
r.uniform(B.data(), params.k * params.n, T(-1.0), T(1.0), stream);
}
void deallocateBuffers(const ::benchmark::State& state) override
{
A.release();
B.release();
C.release();
}
void runBenchmark(::benchmark::State& state) override
{
if (!this->kernel) { state.SkipWithError("Kernel matrix is not initialized"); }
loopOnState(state, [this]() {
(*this->kernel)(A.data(),
this->params.m,
this->params.k,
B.data(),
this->params.n,
C.data(),
this->params.is_row_major,
this->stream);
});
}
private:
cublasHandle_t cublas_handle;
std::unique_ptr<GramMatrixBase<T>> kernel;
GramTestParams params;
rmm::device_uvector<T> A; // input matrix A, size [m * k]
rmm::device_uvector<T> B; // input matrix B, size [n * k]
rmm::device_uvector<T> C; // output matrix C, size [m*n]
};
static std::vector<GramTestParams> getInputs()
{
std::vector<GramTestParams> param_vec;
std::vector<KernelParams> kernel_params{KernelParams{LINEAR, 3, 1, 0},
KernelParams{POLYNOMIAL, 2, 1.3, 1},
KernelParams{TANH, 2, 0.5, 2.4},
KernelParams{RBF, 2, 0.5, 0}};
struct TestSize {
int m;
int k;
int n;
};
std::vector<TestSize> data_size{{4096, 10, 1024},
{4096, 100, 1024},
{4096, 1000, 1024},
{4096, 10000, 1024},
{100000, 10, 1024},
{100000, 100, 1024},
{100000, 1000, 1024}};
param_vec.reserve(kernel_params.size() * data_size.size());
for (TestSize s : data_size) {
for (auto kernel : kernel_params) {
for (bool row_major : {false, true}) {
param_vec.push_back(GramTestParams{s.m, s.k, s.n, kernel, row_major});
}
}
}
return param_vec;
}
ML_BENCH_REGISTER(GramTestParams, GramMatrix<float>, "", getInputs());
ML_BENCH_REGISTER(GramTestParams, GramMatrix<double>, "", getInputs());
} // namespace Matrix
} // namespace Bench
} // namespace MLCommon
|
0ff4704c4260c0d2b926a921fbff6866319e5843.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hip/hip_runtime.h>
#include "paddle/fluid/operators/amp/check_finite_and_unscale_op.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void InverseAndMemset(const T* s, T* o, bool* found_inf) {
*o = Inverse<T>(*s);
*found_inf = false;
}
template <typename T, typename MT>
__global__ void CheckFiniteAndUnscale(const T* in, const MT* scale, int num,
bool* found_inf, T* out) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < num) {
MT val = static_cast<MT>(in[idx]) * (*scale);
T narrow_val = static_cast<T>(val);
out[idx] = narrow_val;
if (!isfinite(narrow_val)) {
*found_inf = true;
}
}
}
template <typename T>
class CheckFiniteAndUnscaleGpuKernel : public framework::OpKernel<T> {
using MPDType = typename details::MPTypeTrait<T>::Type;
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
const auto xs = ctx.MultiInput<framework::Tensor>("X");
const auto* scale = ctx.Input<framework::Tensor>("Scale");
auto outs = ctx.MultiOutput<framework::Tensor>("Out");
auto* found_inf = ctx.Output<framework::Tensor>("FoundInfinite");
const MPDType* scale_data = scale->data<MPDType>();
bool* found_inf_data = found_inf->mutable_data<bool>(dev_ctx.GetPlace());
framework::Tensor inverse_scale =
ctx.AllocateTmpTensor<MPDType, platform::CUDADeviceContext>({1},
dev_ctx);
MPDType* inverse_scale_v = inverse_scale.template data<MPDType>();
hipLaunchKernelGGL(( InverseAndMemset<MPDType>), dim3(1), dim3(1), 0, dev_ctx.stream(),
scale_data, inverse_scale_v, found_inf_data);
for (size_t i = 0; i < xs.size(); ++i) {
const auto* x = xs[i];
auto* out = outs[i];
const T* x_data = x->data<T>();
T* out_data = out->mutable_data<T>(dev_ctx.GetPlace());
int num = x->numel();
int block = 1024;
int grid = (num + block - 1) / block;
VLOG(3) << "launch kernel";
hipLaunchKernelGGL(( CheckFiniteAndUnscale<T, MPDType>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
x_data, inverse_scale_v, num, found_inf_data, out_data);
VLOG(3) << "finish kernel";
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(check_finite_and_unscale,
ops::CheckFiniteAndUnscaleGpuKernel<float>,
ops::CheckFiniteAndUnscaleGpuKernel<double>,
ops::CheckFiniteAndUnscaleGpuKernel<plat::float16>);
| 0ff4704c4260c0d2b926a921fbff6866319e5843.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda.h>
#include "paddle/fluid/operators/amp/check_finite_and_unscale_op.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void InverseAndMemset(const T* s, T* o, bool* found_inf) {
*o = Inverse<T>(*s);
*found_inf = false;
}
template <typename T, typename MT>
__global__ void CheckFiniteAndUnscale(const T* in, const MT* scale, int num,
bool* found_inf, T* out) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < num) {
MT val = static_cast<MT>(in[idx]) * (*scale);
T narrow_val = static_cast<T>(val);
out[idx] = narrow_val;
if (!isfinite(narrow_val)) {
*found_inf = true;
}
}
}
template <typename T>
class CheckFiniteAndUnscaleGpuKernel : public framework::OpKernel<T> {
using MPDType = typename details::MPTypeTrait<T>::Type;
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
const auto xs = ctx.MultiInput<framework::Tensor>("X");
const auto* scale = ctx.Input<framework::Tensor>("Scale");
auto outs = ctx.MultiOutput<framework::Tensor>("Out");
auto* found_inf = ctx.Output<framework::Tensor>("FoundInfinite");
const MPDType* scale_data = scale->data<MPDType>();
bool* found_inf_data = found_inf->mutable_data<bool>(dev_ctx.GetPlace());
framework::Tensor inverse_scale =
ctx.AllocateTmpTensor<MPDType, platform::CUDADeviceContext>({1},
dev_ctx);
MPDType* inverse_scale_v = inverse_scale.template data<MPDType>();
InverseAndMemset<MPDType><<<1, 1, 0, dev_ctx.stream()>>>(
scale_data, inverse_scale_v, found_inf_data);
for (size_t i = 0; i < xs.size(); ++i) {
const auto* x = xs[i];
auto* out = outs[i];
const T* x_data = x->data<T>();
T* out_data = out->mutable_data<T>(dev_ctx.GetPlace());
int num = x->numel();
int block = 1024;
int grid = (num + block - 1) / block;
VLOG(3) << "launch kernel";
CheckFiniteAndUnscale<T, MPDType><<<grid, block, 0, dev_ctx.stream()>>>(
x_data, inverse_scale_v, num, found_inf_data, out_data);
VLOG(3) << "finish kernel";
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(check_finite_and_unscale,
ops::CheckFiniteAndUnscaleGpuKernel<float>,
ops::CheckFiniteAndUnscaleGpuKernel<double>,
ops::CheckFiniteAndUnscaleGpuKernel<plat::float16>);
|
738320972c9cda759e7d99fb049922a549e79653.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <system/Environment.h>
#include <loops/transform_float.h>
#include <types/types.h>
#include <system/op_boilerplate.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
using namespace simdOps;
template <typename X, typename Z, typename OpType>
__global__ void transformFloatSimple(void *x, Nd4jLong *xShapeInfo, int xRank,
void *params,
void *z, Nd4jLong *zShapeInfo, int zRank,
int *allocationPointer,
void *reductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
functions::transform::TransformFloat<X,Z>::template transformCuda<OpType>(
x, xShapeInfo,
params,
z, zShapeInfo,
allocationPointer, reductionPointer,
tadShapeInfo, tadOffsets);
}
namespace functions {
namespace transform {
template<typename X, typename Y>
_CUDA_H void TransformFloat<X,Y>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS);
DEBUG_KERNEL(stream, opNum);
}
template<typename X, typename Z>
template <typename OpType>
__device__ void TransformFloat<X,Z>::transformCuda(
void *vx,
Nd4jLong *xShapeInfo,
void *vparams,
void *vz,
Nd4jLong *zShapeInfo,
int *allocationPointer, void *vreductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto x = reinterpret_cast<X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto params = reinterpret_cast<Z*>(vparams);
auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer);
if(OpType::requiresSpecial) {
OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
return;
}
else {
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong zEws;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ Nd4jLong length;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
zOrder = shape::order(zShapeInfo);
length = shape::length(xShapeInfo);
}
__syncthreads();
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if(xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') {
for (Nd4jLong i = tid; i < length; i += totalThreads)
z[i * zEws] = OpType::op(x[i * xEws], params);
}
else {
if(vx == vz) {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
z[xOffset] = OpType::op(x[xOffset], params);
}
}
else {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto zOffset = shape::getIndexOffset(i, zShapeInfo);
z[zOffset] = OpType::op(x[xOffset], params);
}
}
}
}
};
template<typename X, typename Y>
__device__ void TransformFloat<X,Y>::transformCudaLegacy(
int opNum,
void *x,
Nd4jLong *xShapeInfo,
void *params,
void *z,
Nd4jLong *zShapeInfo,
int *allocationPointer,
void *reductionPointer,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(transformCuda, PARAMS(x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS);
}
template<typename X, typename Z>
template <typename OpType>
_CUDA_H void TransformFloat<X,Z>::intermediateShaped(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( transformFloatSimple<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
sd::DebugHelper::checkErrorCode(stream, "transformFloat(...) failed");
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformFloat, , LIBND4J_TYPES, FLOAT_TYPES);
}
}
| 738320972c9cda759e7d99fb049922a549e79653.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <system/Environment.h>
#include <loops/transform_float.h>
#include <types/types.h>
#include <system/op_boilerplate.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
using namespace simdOps;
template <typename X, typename Z, typename OpType>
__global__ void transformFloatSimple(void *x, Nd4jLong *xShapeInfo, int xRank,
void *params,
void *z, Nd4jLong *zShapeInfo, int zRank,
int *allocationPointer,
void *reductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
functions::transform::TransformFloat<X,Z>::template transformCuda<OpType>(
x, xShapeInfo,
params,
z, zShapeInfo,
allocationPointer, reductionPointer,
tadShapeInfo, tadOffsets);
}
namespace functions {
namespace transform {
template<typename X, typename Y>
_CUDA_H void TransformFloat<X,Y>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS);
DEBUG_KERNEL(stream, opNum);
}
template<typename X, typename Z>
template <typename OpType>
__device__ void TransformFloat<X,Z>::transformCuda(
void *vx,
Nd4jLong *xShapeInfo,
void *vparams,
void *vz,
Nd4jLong *zShapeInfo,
int *allocationPointer, void *vreductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto x = reinterpret_cast<X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto params = reinterpret_cast<Z*>(vparams);
auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer);
if(OpType::requiresSpecial) {
OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
return;
}
else {
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong zEws;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ Nd4jLong length;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
zOrder = shape::order(zShapeInfo);
length = shape::length(xShapeInfo);
}
__syncthreads();
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if(xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') {
for (Nd4jLong i = tid; i < length; i += totalThreads)
z[i * zEws] = OpType::op(x[i * xEws], params);
}
else {
if(vx == vz) {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
z[xOffset] = OpType::op(x[xOffset], params);
}
}
else {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto zOffset = shape::getIndexOffset(i, zShapeInfo);
z[zOffset] = OpType::op(x[xOffset], params);
}
}
}
}
};
template<typename X, typename Y>
__device__ void TransformFloat<X,Y>::transformCudaLegacy(
int opNum,
void *x,
Nd4jLong *xShapeInfo,
void *params,
void *z,
Nd4jLong *zShapeInfo,
int *allocationPointer,
void *reductionPointer,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(transformCuda, PARAMS(x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS);
}
template<typename X, typename Z>
template <typename OpType>
_CUDA_H void TransformFloat<X,Z>::intermediateShaped(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
transformFloatSimple<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
sd::DebugHelper::checkErrorCode(stream, "transformFloat(...) failed");
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformFloat, , LIBND4J_TYPES, FLOAT_TYPES);
}
}
|
07c41137c95b3ebdc12738f80278d7696d384c10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void msecost(float* predictions, float* target, int size, float* cost) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
float partial_cost = (predictions[index] - target[index]) * (predictions[index] - target[index]);
atomicAdd(cost, partial_cost / size);
}
} | 07c41137c95b3ebdc12738f80278d7696d384c10.cu | #include "includes.h"
__global__ void msecost(float* predictions, float* target, int size, float* cost) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
float partial_cost = (predictions[index] - target[index]) * (predictions[index] - target[index]);
atomicAdd(cost, partial_cost / size);
}
} |
9a45f5801a1552bc1100e5488e1fbb933f89cc65.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <cstdio>
using namespace std;
void sumMatrixHost(float* A, float* B, float* C, const int nx, const int ny) {
float *ia = A;
float *ib = B;
float *ic = C;
for(int iy = 0;iy<ny; iy++) {
for(int ix = 0;ix<nx;ix++) {
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
__global__ void sumMatrixGpu(float* A, float* B, float* C, int NX, int NY) {
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idx = iy * NX + ix;
if(ix < NX && iy < NY) {
C[idx] = A[idx] + B[idx];
}
}
int main(int argc, char** argv) {
int dev = 0;
getCudaInfo();
//NOTE. according to experiments
// 1 nx=1<<14, ny = 1<<14 will cause error and compute result
// mismatch and CudaGetLastError return 77
// 3 nx=1<<13, ny = 1<<14 will work
// execution result under this configuration:
// Using device : GeForce GTX 1050 Ti with Max-Q Design
// host sum cost: 0.321487
// sum matrix on gpu grid x: 256 grid y : 512 block x: 32 block y: 32 cost ts: 0.0161889
// res of get last error: 0
// scale smaller than 2 will work also
int nx = 1<<14;
int ny = 1<<14;
int nxy = nx*ny;
int nb = nxy * sizeof(float);
float * ha, *hb, *host, *gpu;
ha = (float*)malloc(nb);
hb = (float*)malloc(nb);
host = (float*) malloc(nb);
gpu = (float*) malloc(nb);
double start = seconds();
initD(ha, nxy);
initD(hb, nxy);
double elaps = seconds() - start;
memset(host, 0, nb);
memset(gpu, 0, nb);
start = seconds();
sumMatrixHost(ha, hb, host,nx, ny);
elaps = seconds() - start;
std::cout << " host sum cost: " << elaps << std::endl;
float * d_matA, * d_matB, * d_matC;
hipMalloc((void**)&d_matA, nb) ;
hipMalloc((void**)&d_matB, nb) ;
hipMalloc((void**)&d_matC, nb) ;
auto m = hipGetLastError();
// if m == 2 then malloc failed.
cout << "res of get last error after cuda Malloc: " << m << std::endl;
//start = seconds();
hipMemcpy(d_matA, ha, nb, hipMemcpyHostToDevice);
hipMemcpy(d_matB, hb, nb, hipMemcpyHostToDevice);
int dimx = 32;
int dimy = 32;
if(argc > 2) {
dimx = atoi(argv[1]);
dimy = atoi(argv[2]);
}
dim3 block(dimx, dimy);
dim3 grid((nx + block.x -1)/block.x, (ny+block.y-1)/block.y);
hipDeviceSynchronize();
start = seconds();
hipLaunchKernelGGL(( sumMatrixGpu), dim3(grid), dim3(block), 0, 0, d_matA, d_matB, d_matC, nx, ny);
hipDeviceSynchronize();
elaps = seconds() - start;
cout << " sum matrix on gpu grid x: " << grid.x << " grid y : " << grid.y
<< " block x: " << block.x << " block y: " << block.y << " cost ts: "
<< elaps << std::endl;
auto x = hipGetLastError();
cout << "res of get last error: " <<x << std::endl;
hipMemcpy(gpu, d_matC, nb, hipMemcpyDeviceToHost);
checkRes(host, gpu, nxy);
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
free(ha);
free(hb);
free(host);
free(gpu);
hipDeviceReset();
} | 9a45f5801a1552bc1100e5488e1fbb933f89cc65.cu | #include "common.h"
#include <cuda_runtime.h>
#include <cstdio>
using namespace std;
void sumMatrixHost(float* A, float* B, float* C, const int nx, const int ny) {
float *ia = A;
float *ib = B;
float *ic = C;
for(int iy = 0;iy<ny; iy++) {
for(int ix = 0;ix<nx;ix++) {
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
__global__ void sumMatrixGpu(float* A, float* B, float* C, int NX, int NY) {
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idx = iy * NX + ix;
if(ix < NX && iy < NY) {
C[idx] = A[idx] + B[idx];
}
}
int main(int argc, char** argv) {
int dev = 0;
getCudaInfo();
//NOTE. according to experiments
// 1 nx=1<<14, ny = 1<<14 will cause error and compute result
// mismatch and CudaGetLastError return 77
// 3 nx=1<<13, ny = 1<<14 will work
// execution result under this configuration:
// Using device : GeForce GTX 1050 Ti with Max-Q Design
// host sum cost: 0.321487
// sum matrix on gpu grid x: 256 grid y : 512 block x: 32 block y: 32 cost ts: 0.0161889
// res of get last error: 0
// scale smaller than 2 will work also
int nx = 1<<14;
int ny = 1<<14;
int nxy = nx*ny;
int nb = nxy * sizeof(float);
float * ha, *hb, *host, *gpu;
ha = (float*)malloc(nb);
hb = (float*)malloc(nb);
host = (float*) malloc(nb);
gpu = (float*) malloc(nb);
double start = seconds();
initD(ha, nxy);
initD(hb, nxy);
double elaps = seconds() - start;
memset(host, 0, nb);
memset(gpu, 0, nb);
start = seconds();
sumMatrixHost(ha, hb, host,nx, ny);
elaps = seconds() - start;
std::cout << " host sum cost: " << elaps << std::endl;
float * d_matA, * d_matB, * d_matC;
cudaMalloc((void**)&d_matA, nb) ;
cudaMalloc((void**)&d_matB, nb) ;
cudaMalloc((void**)&d_matC, nb) ;
auto m = cudaGetLastError();
// if m == 2 then malloc failed.
cout << "res of get last error after cuda Malloc: " << m << std::endl;
//start = seconds();
cudaMemcpy(d_matA, ha, nb, cudaMemcpyHostToDevice);
cudaMemcpy(d_matB, hb, nb, cudaMemcpyHostToDevice);
int dimx = 32;
int dimy = 32;
if(argc > 2) {
dimx = atoi(argv[1]);
dimy = atoi(argv[2]);
}
dim3 block(dimx, dimy);
dim3 grid((nx + block.x -1)/block.x, (ny+block.y-1)/block.y);
cudaDeviceSynchronize();
start = seconds();
sumMatrixGpu<<<grid, block>>>(d_matA, d_matB, d_matC, nx, ny);
cudaDeviceSynchronize();
elaps = seconds() - start;
cout << " sum matrix on gpu grid x: " << grid.x << " grid y : " << grid.y
<< " block x: " << block.x << " block y: " << block.y << " cost ts: "
<< elaps << std::endl;
auto x = cudaGetLastError();
cout << "res of get last error: " <<x << std::endl;
cudaMemcpy(gpu, d_matC, nb, cudaMemcpyDeviceToHost);
checkRes(host, gpu, nxy);
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
free(ha);
free(hb);
free(host);
free(gpu);
cudaDeviceReset();
} |
236ec7bd7bea088207d45a9be3d9292b81bdba49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <string>
#include <vector>
#include "NCV.hpp"
using namespace std;
//==============================================================================
//
// Error handling helpers
//
//==============================================================================
static void stdDebugOutput(const string &msg)
{
cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
void ncvDebugOutput(const string &msg)
{
debugOutputHandler(msg);
}
void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
{
debugOutputHandler = func;
}
//==============================================================================
//
// Memory wrappers and helpers
//
//==============================================================================
Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
{
Ncv32u alignMask = alignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u res = (what + alignMask) & inverseAlignMask;
return res;
}
void NCVMemPtr::clear()
{
ptr = NULL;
memtype = NCVMemoryTypeNone;
}
void NCVMemSegment::clear()
{
begin.clear();
size = 0;
}
NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, hipStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
memcpy(dst, src, sz);
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
Ncv32u widthbytes, Ncv32u height, hipStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
for (Ncv32u i=0; i<height; i++)
{
memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
//===================================================================
//
// NCVMemStackAllocator class members implementation
//
//===================================================================
NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
begin(NULL),
end(NULL),
_memType(NCVMemoryTypeNone),
_alignment(alignment),
bReusesMemory(false)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
}
NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment, void *reusePtr)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
_memType(memT),
_alignment(alignment)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
allocBegin = NULL;
if (reusePtr == NULL && capacity != 0)
{
bReusesMemory = false;
switch (memT)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPageable:
allocBegin = (Ncv8u *)malloc(capacity);
break;
default:;
}
}
else
{
bReusesMemory = true;
allocBegin = (Ncv8u *)reusePtr;
}
if (capacity == 0)
{
allocBegin = (Ncv8u *)(0x1);
}
if (!isCounting())
{
begin = allocBegin;
end = begin + capacity;
}
}
NCVMemStackAllocator::~NCVMemStackAllocator()
{
if (allocBegin != NULL)
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
{
switch (_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipFree(allocBegin), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostFree(allocBegin), );
break;
case NCVMemoryTypeHostPageable:
free(allocBegin);
break;
default:;
}
}
allocBegin = NULL;
}
}
NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
size = alignUp(size, this->_alignment);
this->currentSize += size;
this->_maxSize = ::max(this->_maxSize, this->currentSize);
if (!isCounting())
{
size_t availSize = end - begin;
ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
}
seg.begin.ptr = begin;
seg.begin.memtype = this->_memType;
seg.size = size;
begin += size;
return NCV_SUCCESS;
}
NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
currentSize -= seg.size;
begin -= seg.size;
seg.clear();
ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
return NCV_SUCCESS;
}
NcvBool NCVMemStackAllocator::isInitialized(void) const
{
return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL;
}
NcvBool NCVMemStackAllocator::isCounting(void) const
{
return this->_memType == NCVMemoryTypeNone;
}
NCVMemoryType NCVMemStackAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemStackAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemStackAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// NCVMemNativeAllocator class members implementation
//
//===================================================================
NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
_memType(memT),
_alignment(alignment)
{
ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
}
NCVMemNativeAllocator::~NCVMemNativeAllocator()
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
}
NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
seg.begin.ptr = (Ncv8u *)malloc(size);
break;
default:;
}
this->currentSize += alignUp(size, this->_alignment);
this->_maxSize = ::max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
return NCV_SUCCESS;
}
NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(currentSize >= alignUp(seg.size, this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
currentSize -= alignUp(seg.size, this->_alignment);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
free(seg.begin.ptr);
break;
default:;
}
seg.clear();
return NCV_SUCCESS;
}
NcvBool NCVMemNativeAllocator::isInitialized(void) const
{
return (this->_alignment != 0);
}
NcvBool NCVMemNativeAllocator::isCounting(void) const
{
return false;
}
NCVMemoryType NCVMemNativeAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemNativeAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemNativeAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// Time and timer routines
//
//===================================================================
typedef struct _NcvTimeMoment NcvTimeMoment;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
typedef struct _NcvTimeMoment
{
LONGLONG moment, freq;
} NcvTimeMoment;
static void _ncvQueryMoment(NcvTimeMoment *t)
{
QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->moment / t->freq;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
#elif defined(__GNUC__)
#include <sys/time.h>
typedef struct _NcvTimeMoment
{
struct timeval tv;
struct timezone tz;
} NcvTimeMoment;
void _ncvQueryMoment(NcvTimeMoment *t)
{
gettimeofday(& t->tv, & t->tz);
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
}
#endif //#if defined(_WIN32) || defined(_WIN64)
struct _NcvTimer
{
NcvTimeMoment t1, t2;
};
NcvTimer ncvStartTimer(void)
{
struct _NcvTimer *t;
t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
_ncvQueryMoment(&t->t1);
return t;
}
double ncvEndQueryTimerUs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
free(t);
return res;
}
double ncvEndQueryTimerMs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
free(t);
return res;
}
//===================================================================
//
// Operations with rectangles
//
//===================================================================
//from OpenCV
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights);
NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
Ncv32u &numHypotheses,
Ncv32u minNeighbors,
Ncv32f intersectEps,
NCVVector<Ncv32u> *hypothesesWeights)
{
ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
if (hypothesesWeights != NULL)
{
ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
}
if (numHypotheses == 0)
{
return NCV_SUCCESS;
}
std::vector<NcvRect32u> rects(numHypotheses);
memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
std::vector<Ncv32u> weights;
if (hypothesesWeights != NULL)
{
groupRectangles(rects, minNeighbors, intersectEps, &weights);
}
else
{
groupRectangles(rects, minNeighbors, intersectEps, NULL);
}
numHypotheses = (Ncv32u)rects.size();
if (numHypotheses > 0)
{
memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
}
if (hypothesesWeights != NULL)
{
memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
}
return NCV_SUCCESS;
}
template <class T>
static NCVStatus drawRectsWrapperHost(T *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
T color)
{
ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects != 0, NCV_SUCCESS);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
for (Ncv32u i=0; i<numRects; i++)
{
NcvRect32u rect = h_rects[i];
if (rect.x < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[rect.y*dstStride+j] = color;
}
}
if (rect.y + rect.height - 1 < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
}
}
}
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv8u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv32u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
hipStream_t cuStream)
{
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( drawRects<T>), dim3(grid), dim3(block), 0, 0, d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
| 236ec7bd7bea088207d45a9be3d9292b81bdba49.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <string>
#include <vector>
#include "NCV.hpp"
using namespace std;
//==============================================================================
//
// Error handling helpers
//
//==============================================================================
static void stdDebugOutput(const string &msg)
{
cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
void ncvDebugOutput(const string &msg)
{
debugOutputHandler(msg);
}
void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
{
debugOutputHandler = func;
}
//==============================================================================
//
// Memory wrappers and helpers
//
//==============================================================================
Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
{
Ncv32u alignMask = alignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u res = (what + alignMask) & inverseAlignMask;
return res;
}
void NCVMemPtr::clear()
{
ptr = NULL;
memtype = NCVMemoryTypeNone;
}
void NCVMemSegment::clear()
{
begin.clear();
size = 0;
}
NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, cudaStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
memcpy(dst, src, sz);
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
Ncv32u widthbytes, Ncv32u height, cudaStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
for (Ncv32u i=0; i<height; i++)
{
memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
//===================================================================
//
// NCVMemStackAllocator class members implementation
//
//===================================================================
NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
begin(NULL),
end(NULL),
_memType(NCVMemoryTypeNone),
_alignment(alignment),
bReusesMemory(false)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
}
NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment, void *reusePtr)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
_memType(memT),
_alignment(alignment)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
allocBegin = NULL;
if (reusePtr == NULL && capacity != 0)
{
bReusesMemory = false;
switch (memT)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaMallocHost(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPageable:
allocBegin = (Ncv8u *)malloc(capacity);
break;
default:;
}
}
else
{
bReusesMemory = true;
allocBegin = (Ncv8u *)reusePtr;
}
if (capacity == 0)
{
allocBegin = (Ncv8u *)(0x1);
}
if (!isCounting())
{
begin = allocBegin;
end = begin + capacity;
}
}
NCVMemStackAllocator::~NCVMemStackAllocator()
{
if (allocBegin != NULL)
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
{
switch (_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaFree(allocBegin), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaFreeHost(allocBegin), );
break;
case NCVMemoryTypeHostPageable:
free(allocBegin);
break;
default:;
}
}
allocBegin = NULL;
}
}
NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
size = alignUp(size, this->_alignment);
this->currentSize += size;
this->_maxSize = std::max(this->_maxSize, this->currentSize);
if (!isCounting())
{
size_t availSize = end - begin;
ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
}
seg.begin.ptr = begin;
seg.begin.memtype = this->_memType;
seg.size = size;
begin += size;
return NCV_SUCCESS;
}
NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
currentSize -= seg.size;
begin -= seg.size;
seg.clear();
ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
return NCV_SUCCESS;
}
NcvBool NCVMemStackAllocator::isInitialized(void) const
{
return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL;
}
NcvBool NCVMemStackAllocator::isCounting(void) const
{
return this->_memType == NCVMemoryTypeNone;
}
NCVMemoryType NCVMemStackAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemStackAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemStackAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// NCVMemNativeAllocator class members implementation
//
//===================================================================
NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
_memType(memT),
_alignment(alignment)
{
ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
}
NCVMemNativeAllocator::~NCVMemNativeAllocator()
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
}
NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaMallocHost(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
seg.begin.ptr = (Ncv8u *)malloc(size);
break;
default:;
}
this->currentSize += alignUp(size, this->_alignment);
this->_maxSize = std::max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
return NCV_SUCCESS;
}
NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(currentSize >= alignUp(seg.size, this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
currentSize -= alignUp(seg.size, this->_alignment);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaFreeHost(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
free(seg.begin.ptr);
break;
default:;
}
seg.clear();
return NCV_SUCCESS;
}
NcvBool NCVMemNativeAllocator::isInitialized(void) const
{
return (this->_alignment != 0);
}
NcvBool NCVMemNativeAllocator::isCounting(void) const
{
return false;
}
NCVMemoryType NCVMemNativeAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemNativeAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemNativeAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// Time and timer routines
//
//===================================================================
typedef struct _NcvTimeMoment NcvTimeMoment;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
typedef struct _NcvTimeMoment
{
LONGLONG moment, freq;
} NcvTimeMoment;
static void _ncvQueryMoment(NcvTimeMoment *t)
{
QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->moment / t->freq;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
#elif defined(__GNUC__)
#include <sys/time.h>
typedef struct _NcvTimeMoment
{
struct timeval tv;
struct timezone tz;
} NcvTimeMoment;
void _ncvQueryMoment(NcvTimeMoment *t)
{
gettimeofday(& t->tv, & t->tz);
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
}
#endif //#if defined(_WIN32) || defined(_WIN64)
struct _NcvTimer
{
NcvTimeMoment t1, t2;
};
NcvTimer ncvStartTimer(void)
{
struct _NcvTimer *t;
t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
_ncvQueryMoment(&t->t1);
return t;
}
double ncvEndQueryTimerUs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
free(t);
return res;
}
double ncvEndQueryTimerMs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
free(t);
return res;
}
//===================================================================
//
// Operations with rectangles
//
//===================================================================
//from OpenCV
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights);
NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
Ncv32u &numHypotheses,
Ncv32u minNeighbors,
Ncv32f intersectEps,
NCVVector<Ncv32u> *hypothesesWeights)
{
ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
if (hypothesesWeights != NULL)
{
ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
}
if (numHypotheses == 0)
{
return NCV_SUCCESS;
}
std::vector<NcvRect32u> rects(numHypotheses);
memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
std::vector<Ncv32u> weights;
if (hypothesesWeights != NULL)
{
groupRectangles(rects, minNeighbors, intersectEps, &weights);
}
else
{
groupRectangles(rects, minNeighbors, intersectEps, NULL);
}
numHypotheses = (Ncv32u)rects.size();
if (numHypotheses > 0)
{
memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
}
if (hypothesesWeights != NULL)
{
memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
}
return NCV_SUCCESS;
}
template <class T>
static NCVStatus drawRectsWrapperHost(T *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
T color)
{
ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects != 0, NCV_SUCCESS);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
for (Ncv32u i=0; i<numRects; i++)
{
NcvRect32u rect = h_rects[i];
if (rect.x < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[rect.y*dstStride+j] = color;
}
}
if (rect.y + rect.height - 1 < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
}
}
}
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv8u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv32u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
cudaStream_t cuStream)
{
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
drawRects<T><<<grid, block>>>(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
|
a24ebd338a5076c4313cb8115825ade3d0addbb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<sys/time.h>
#define MAXROW 1024
#define MAXCOL 1024
double when()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6);
}
void initialize(float *oA, float *nA)
{
int i,j;
for(i=0; i<MAXROW; i++)
{
for(j=0;j<MAXCOL;j++)
{
if(i==0 || j==0 || j==(MAXCOL-1))
{
nA[i*MAXCOL + j] = 0.0;
oA[i*MAXCOL + j] = 0.0;
}
else if (i==MAXROW-1)
{
nA[i*MAXCOL + j] = 100.0;
oA[i*MAXCOL + j] = 100.0;
}
else if (i==400 && j<=330)
{
nA[i*MAXCOL + j] = 100.0;
oA[i*MAXCOL + j] = 100.0;
}
else if (i==200 && j ==500)
{
nA[i*MAXCOL + j] = 100.0;
oA[i*MAXCOL + j] = 100.0;
}
else
{
nA[i*MAXCOL + j] = 50.0;
oA[i*MAXCOL + j] = 50.0;
}
}
}
}
__global__ void calculate_new_values(float *nA, float *oA)
{
if(blockIdx.x == 0 || threadIdx.x ==0 ||blockIdx.x == MAXCOL-1 || threadIdx.x == MAXCOL-1
|| (blockIdx.x==400 && threadIdx.x<=330) || (blockIdx.x==200 && threadIdx.x==500) )
{}
else
{
nA[blockIdx.x * MAXCOL + threadIdx.x]
=(oA[(blockIdx.x+1) * MAXCOL + threadIdx.x] +
oA[(blockIdx.x-1) * MAXCOL + threadIdx.x] +
oA[blockIdx.x * MAXCOL + threadIdx.x+1] +
oA[blockIdx.x * MAXCOL + threadIdx.x-1] +
(4 * oA[blockIdx.x * MAXCOL + threadIdx.x]))/8.0;
}
}
int main(void)
{
double start_time = when();
float *nA, *oA;
float *d_nA, *d_oA;
float *tmp;
int iter=0;
float convergence;
int converged = 0;
int size = MAXROW * MAXCOL * sizeof(float);
nA = (float*)malloc(size);
oA = (float*)malloc(size);
hipError_t err = hipMalloc((void**)&d_nA,size);
hipError_t err1 = hipMalloc((void**)&d_oA,size);
initialize(oA, nA);
hipMemcpy(d_oA, oA, size, hipMemcpyHostToDevice);
hipMemcpy(d_nA, nA, size, hipMemcpyHostToDevice);
while(!converged)
{
hipLaunchKernelGGL(( calculate_new_values), dim3(1024),dim3(1024), 0, 0, d_nA , d_oA);
hipMemcpy(nA , d_nA, size, hipMemcpyDeviceToHost);
converged = 1;
for(int i=1;i<MAXROW-1;i++)
{
for(int j=1;j<MAXCOL-1;j++)
{
if( (i==400 && j<=330) || (i==200 && j ==500))
{
//skip
}
else
{
convergence = nA[i*MAXCOL + j]- ((nA[(i+1)*MAXCOL + j] + nA[(i-1)*MAXCOL + j]
+ nA[i*MAXCOL + j+1] + nA[i*MAXCOL + j-1])/4.0 );
if(fabs(convergence) > 0.1)
{
converged = 0;
break;
}
}
}
if(converged == 0)
break;
}
iter++;
tmp = d_nA;
d_nA = d_oA;
d_oA = tmp;
}
printf("iter = %d and execution time = %f\n",iter, when() - start_time);
hipMemcpy(nA, d_nA, size, hipMemcpyDeviceToHost);
hipFree(d_nA);
hipFree(d_oA);
free(nA);
free(oA);
return 1;
}
| a24ebd338a5076c4313cb8115825ade3d0addbb7.cu | #include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<sys/time.h>
#define MAXROW 1024
#define MAXCOL 1024
double when()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6);
}
void initialize(float *oA, float *nA)
{
int i,j;
for(i=0; i<MAXROW; i++)
{
for(j=0;j<MAXCOL;j++)
{
if(i==0 || j==0 || j==(MAXCOL-1))
{
nA[i*MAXCOL + j] = 0.0;
oA[i*MAXCOL + j] = 0.0;
}
else if (i==MAXROW-1)
{
nA[i*MAXCOL + j] = 100.0;
oA[i*MAXCOL + j] = 100.0;
}
else if (i==400 && j<=330)
{
nA[i*MAXCOL + j] = 100.0;
oA[i*MAXCOL + j] = 100.0;
}
else if (i==200 && j ==500)
{
nA[i*MAXCOL + j] = 100.0;
oA[i*MAXCOL + j] = 100.0;
}
else
{
nA[i*MAXCOL + j] = 50.0;
oA[i*MAXCOL + j] = 50.0;
}
}
}
}
__global__ void calculate_new_values(float *nA, float *oA)
{
if(blockIdx.x == 0 || threadIdx.x ==0 ||blockIdx.x == MAXCOL-1 || threadIdx.x == MAXCOL-1
|| (blockIdx.x==400 && threadIdx.x<=330) || (blockIdx.x==200 && threadIdx.x==500) )
{}
else
{
nA[blockIdx.x * MAXCOL + threadIdx.x]
=(oA[(blockIdx.x+1) * MAXCOL + threadIdx.x] +
oA[(blockIdx.x-1) * MAXCOL + threadIdx.x] +
oA[blockIdx.x * MAXCOL + threadIdx.x+1] +
oA[blockIdx.x * MAXCOL + threadIdx.x-1] +
(4 * oA[blockIdx.x * MAXCOL + threadIdx.x]))/8.0;
}
}
int main(void)
{
double start_time = when();
float *nA, *oA;
float *d_nA, *d_oA;
float *tmp;
int iter=0;
float convergence;
int converged = 0;
int size = MAXROW * MAXCOL * sizeof(float);
nA = (float*)malloc(size);
oA = (float*)malloc(size);
cudaError_t err = cudaMalloc((void**)&d_nA,size);
cudaError_t err1 = cudaMalloc((void**)&d_oA,size);
initialize(oA, nA);
cudaMemcpy(d_oA, oA, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_nA, nA, size, cudaMemcpyHostToDevice);
while(!converged)
{
calculate_new_values<<<1024,1024>>>(d_nA , d_oA);
cudaMemcpy(nA , d_nA, size, cudaMemcpyDeviceToHost);
converged = 1;
for(int i=1;i<MAXROW-1;i++)
{
for(int j=1;j<MAXCOL-1;j++)
{
if( (i==400 && j<=330) || (i==200 && j ==500))
{
//skip
}
else
{
convergence = nA[i*MAXCOL + j]- ((nA[(i+1)*MAXCOL + j] + nA[(i-1)*MAXCOL + j]
+ nA[i*MAXCOL + j+1] + nA[i*MAXCOL + j-1])/4.0 );
if(fabs(convergence) > 0.1)
{
converged = 0;
break;
}
}
}
if(converged == 0)
break;
}
iter++;
tmp = d_nA;
d_nA = d_oA;
d_oA = tmp;
}
printf("iter = %d and execution time = %f\n",iter, when() - start_time);
cudaMemcpy(nA, d_nA, size, cudaMemcpyDeviceToHost);
cudaFree(d_nA);
cudaFree(d_oA);
free(nA);
free(oA);
return 1;
}
|
1906aae80277e5441d59a02fc3e3e548240b80f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, double * __restrict__ __var_4__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_0__ <= (M-3)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_1__ <= (N-3)){
double __temp_0__;
__temp_0__ = (2 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(-2))]);
double __temp_1__;
__temp_1__ = (4 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-2))]);
double __temp_2__;
__temp_2__ = (__temp_0__ + __temp_1__);
double __temp_3__;
__temp_3__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(-2))]);
double __temp_4__;
__temp_4__ = (__temp_2__ + __temp_3__);
double __temp_5__;
__temp_5__ = (4 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-2))]);
double __temp_6__;
__temp_6__ = (__temp_4__ + __temp_5__);
double __temp_7__;
__temp_7__ = (2 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(-2))]);
double __temp_8__;
__temp_8__ = (__temp_6__ + __temp_7__);
double __temp_9__;
__temp_9__ = (4 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(-1))]);
double __temp_10__;
__temp_10__ = (__temp_8__ + __temp_9__);
double __temp_11__;
__temp_11__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-1))]);
double __temp_12__;
__temp_12__ = (__temp_10__ + __temp_11__);
double __temp_13__;
__temp_13__ = (12 * input[__iter_0__+(M-0)*(__iter_1__+(-1))]);
double __temp_14__;
__temp_14__ = (__temp_12__ + __temp_13__);
double __temp_15__;
__temp_15__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-1))]);
double __temp_16__;
__temp_16__ = (__temp_14__ + __temp_15__);
double __temp_17__;
__temp_17__ = (4 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(-1))]);
double __temp_18__;
__temp_18__ = (__temp_16__ + __temp_17__);
double __temp_19__;
__temp_19__ = (5 * input[__iter_0__+(-2)+(M-0)*(__iter_1__)]);
double __temp_20__;
__temp_20__ = (__temp_18__ + __temp_19__);
double __temp_21__;
__temp_21__ = (12 * input[__iter_0__+(-1)+(M-0)*(__iter_1__)]);
double __temp_22__;
__temp_22__ = (__temp_20__ + __temp_21__);
double __temp_23__;
__temp_23__ = (15 * input[__iter_0__+(M-0)*(__iter_1__)]);
double __temp_24__;
__temp_24__ = (__temp_22__ + __temp_23__);
double __temp_25__;
__temp_25__ = (12 * input[__iter_0__+(1)+(M-0)*(__iter_1__)]);
double __temp_26__;
__temp_26__ = (__temp_24__ + __temp_25__);
double __temp_27__;
__temp_27__ = (5 * input[__iter_0__+(2)+(M-0)*(__iter_1__)]);
double __temp_28__;
__temp_28__ = (__temp_26__ + __temp_27__);
double __temp_29__;
__temp_29__ = (4 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(1))]);
double __temp_30__;
__temp_30__ = (__temp_28__ + __temp_29__);
double __temp_31__;
__temp_31__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(1))]);
double __temp_32__;
__temp_32__ = (__temp_30__ + __temp_31__);
double __temp_33__;
__temp_33__ = (12 * input[__iter_0__+(M-0)*(__iter_1__+(1))]);
double __temp_34__;
__temp_34__ = (__temp_32__ + __temp_33__);
double __temp_35__;
__temp_35__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(1))]);
double __temp_36__;
__temp_36__ = (__temp_34__ + __temp_35__);
double __temp_37__;
__temp_37__ = (4 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(1))]);
double __temp_38__;
__temp_38__ = (__temp_36__ + __temp_37__);
double __temp_39__;
__temp_39__ = (2 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(2))]);
double __temp_40__;
__temp_40__ = (__temp_38__ + __temp_39__);
double __temp_41__;
__temp_41__ = (4 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(2))]);
double __temp_42__;
__temp_42__ = (__temp_40__ + __temp_41__);
double __temp_43__;
__temp_43__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(2))]);
double __temp_44__;
__temp_44__ = (__temp_42__ + __temp_43__);
double __temp_45__;
__temp_45__ = (4 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(2))]);
double __temp_46__;
__temp_46__ = (__temp_44__ + __temp_45__);
double __temp_47__;
__temp_47__ = (2 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(2))]);
double __temp_48__;
__temp_48__ = (__temp_46__ + __temp_47__);
double __temp_49__;
__temp_49__ = (__temp_48__ / 159);
__var_4__[__iter_0__+(M-0)*(__iter_1__)] = __temp_49__;
}
}
}
__global__ void __kernel___forma_kernel__1__(double * __restrict__ __var_4__, int N, int M, double * __restrict__ __var_3__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_2__;
__iter_2__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_2__ <= (M-3)){
int __iter_3__;
__iter_3__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_3__ <= (N-3)){
double __temp_50__;
__temp_50__ = (2 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(-2))]);
double __temp_51__;
__temp_51__ = (4 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-2))]);
double __temp_52__;
__temp_52__ = (__temp_50__ + __temp_51__);
double __temp_53__;
__temp_53__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-2))]);
double __temp_54__;
__temp_54__ = (__temp_52__ + __temp_53__);
double __temp_55__;
__temp_55__ = (4 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-2))]);
double __temp_56__;
__temp_56__ = (__temp_54__ + __temp_55__);
double __temp_57__;
__temp_57__ = (2 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(-2))]);
double __temp_58__;
__temp_58__ = (__temp_56__ + __temp_57__);
double __temp_59__;
__temp_59__ = (4 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(-1))]);
double __temp_60__;
__temp_60__ = (__temp_58__ + __temp_59__);
double __temp_61__;
__temp_61__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-1))]);
double __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
double __temp_63__;
__temp_63__ = (12 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-1))]);
double __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
double __temp_65__;
__temp_65__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-1))]);
double __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
double __temp_67__;
__temp_67__ = (4 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(-1))]);
double __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
double __temp_69__;
__temp_69__ = (5 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__)]);
double __temp_70__;
__temp_70__ = (__temp_68__ + __temp_69__);
double __temp_71__;
__temp_71__ = (12 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__)]);
double __temp_72__;
__temp_72__ = (__temp_70__ + __temp_71__);
double __temp_73__;
__temp_73__ = (15 * __var_4__[__iter_2__+(M-0)*(__iter_3__)]);
double __temp_74__;
__temp_74__ = (__temp_72__ + __temp_73__);
double __temp_75__;
__temp_75__ = (12 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__)]);
double __temp_76__;
__temp_76__ = (__temp_74__ + __temp_75__);
double __temp_77__;
__temp_77__ = (5 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__)]);
double __temp_78__;
__temp_78__ = (__temp_76__ + __temp_77__);
double __temp_79__;
__temp_79__ = (4 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(1))]);
double __temp_80__;
__temp_80__ = (__temp_78__ + __temp_79__);
double __temp_81__;
__temp_81__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(1))]);
double __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
double __temp_83__;
__temp_83__ = (12 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(1))]);
double __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
double __temp_85__;
__temp_85__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(1))]);
double __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
double __temp_87__;
__temp_87__ = (4 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(1))]);
double __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
double __temp_89__;
__temp_89__ = (2 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(2))]);
double __temp_90__;
__temp_90__ = (__temp_88__ + __temp_89__);
double __temp_91__;
__temp_91__ = (4 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(2))]);
double __temp_92__;
__temp_92__ = (__temp_90__ + __temp_91__);
double __temp_93__;
__temp_93__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(2))]);
double __temp_94__;
__temp_94__ = (__temp_92__ + __temp_93__);
double __temp_95__;
__temp_95__ = (4 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(2))]);
double __temp_96__;
__temp_96__ = (__temp_94__ + __temp_95__);
double __temp_97__;
__temp_97__ = (2 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(2))]);
double __temp_98__;
__temp_98__ = (__temp_96__ + __temp_97__);
double __temp_99__;
__temp_99__ = (__temp_98__ / 159);
__var_3__[__iter_2__+(M-0)*(__iter_3__)] = __temp_99__;
}
}
}
__global__ void __kernel___forma_kernel__2__(double * __restrict__ __var_3__, int N, int M, double * __restrict__ __var_2__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_4__;
__iter_4__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_4__ <= (M-3)){
int __iter_5__;
__iter_5__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_5__ <= (N-3)){
double __temp_100__;
__temp_100__ = (2 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(-2))]);
double __temp_101__;
__temp_101__ = (4 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-2))]);
double __temp_102__;
__temp_102__ = (__temp_100__ + __temp_101__);
double __temp_103__;
__temp_103__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-2))]);
double __temp_104__;
__temp_104__ = (__temp_102__ + __temp_103__);
double __temp_105__;
__temp_105__ = (4 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-2))]);
double __temp_106__;
__temp_106__ = (__temp_104__ + __temp_105__);
double __temp_107__;
__temp_107__ = (2 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(-2))]);
double __temp_108__;
__temp_108__ = (__temp_106__ + __temp_107__);
double __temp_109__;
__temp_109__ = (4 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(-1))]);
double __temp_110__;
__temp_110__ = (__temp_108__ + __temp_109__);
double __temp_111__;
__temp_111__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-1))]);
double __temp_112__;
__temp_112__ = (__temp_110__ + __temp_111__);
double __temp_113__;
__temp_113__ = (12 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-1))]);
double __temp_114__;
__temp_114__ = (__temp_112__ + __temp_113__);
double __temp_115__;
__temp_115__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-1))]);
double __temp_116__;
__temp_116__ = (__temp_114__ + __temp_115__);
double __temp_117__;
__temp_117__ = (4 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(-1))]);
double __temp_118__;
__temp_118__ = (__temp_116__ + __temp_117__);
double __temp_119__;
__temp_119__ = (5 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__)]);
double __temp_120__;
__temp_120__ = (__temp_118__ + __temp_119__);
double __temp_121__;
__temp_121__ = (12 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__)]);
double __temp_122__;
__temp_122__ = (__temp_120__ + __temp_121__);
double __temp_123__;
__temp_123__ = (15 * __var_3__[__iter_4__+(M-0)*(__iter_5__)]);
double __temp_124__;
__temp_124__ = (__temp_122__ + __temp_123__);
double __temp_125__;
__temp_125__ = (12 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__)]);
double __temp_126__;
__temp_126__ = (__temp_124__ + __temp_125__);
double __temp_127__;
__temp_127__ = (5 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__)]);
double __temp_128__;
__temp_128__ = (__temp_126__ + __temp_127__);
double __temp_129__;
__temp_129__ = (4 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(1))]);
double __temp_130__;
__temp_130__ = (__temp_128__ + __temp_129__);
double __temp_131__;
__temp_131__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(1))]);
double __temp_132__;
__temp_132__ = (__temp_130__ + __temp_131__);
double __temp_133__;
__temp_133__ = (12 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(1))]);
double __temp_134__;
__temp_134__ = (__temp_132__ + __temp_133__);
double __temp_135__;
__temp_135__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(1))]);
double __temp_136__;
__temp_136__ = (__temp_134__ + __temp_135__);
double __temp_137__;
__temp_137__ = (4 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(1))]);
double __temp_138__;
__temp_138__ = (__temp_136__ + __temp_137__);
double __temp_139__;
__temp_139__ = (2 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(2))]);
double __temp_140__;
__temp_140__ = (__temp_138__ + __temp_139__);
double __temp_141__;
__temp_141__ = (4 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(2))]);
double __temp_142__;
__temp_142__ = (__temp_140__ + __temp_141__);
double __temp_143__;
__temp_143__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(2))]);
double __temp_144__;
__temp_144__ = (__temp_142__ + __temp_143__);
double __temp_145__;
__temp_145__ = (4 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(2))]);
double __temp_146__;
__temp_146__ = (__temp_144__ + __temp_145__);
double __temp_147__;
__temp_147__ = (2 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(2))]);
double __temp_148__;
__temp_148__ = (__temp_146__ + __temp_147__);
double __temp_149__;
__temp_149__ = (__temp_148__ / 159);
__var_2__[__iter_4__+(M-0)*(__iter_5__)] = __temp_149__;
}
}
}
__global__ void __kernel___forma_kernel__3__(double * __restrict__ __var_2__, int N, int M, double * __restrict__ __var_1__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_6__;
__iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_6__ <= (M-3)){
int __iter_7__;
__iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_7__ <= (N-3)){
double __temp_150__;
__temp_150__ = (2 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(-2))]);
double __temp_151__;
__temp_151__ = (4 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-2))]);
double __temp_152__;
__temp_152__ = (__temp_150__ + __temp_151__);
double __temp_153__;
__temp_153__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-2))]);
double __temp_154__;
__temp_154__ = (__temp_152__ + __temp_153__);
double __temp_155__;
__temp_155__ = (4 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-2))]);
double __temp_156__;
__temp_156__ = (__temp_154__ + __temp_155__);
double __temp_157__;
__temp_157__ = (2 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(-2))]);
double __temp_158__;
__temp_158__ = (__temp_156__ + __temp_157__);
double __temp_159__;
__temp_159__ = (4 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(-1))]);
double __temp_160__;
__temp_160__ = (__temp_158__ + __temp_159__);
double __temp_161__;
__temp_161__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-1))]);
double __temp_162__;
__temp_162__ = (__temp_160__ + __temp_161__);
double __temp_163__;
__temp_163__ = (12 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-1))]);
double __temp_164__;
__temp_164__ = (__temp_162__ + __temp_163__);
double __temp_165__;
__temp_165__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-1))]);
double __temp_166__;
__temp_166__ = (__temp_164__ + __temp_165__);
double __temp_167__;
__temp_167__ = (4 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(-1))]);
double __temp_168__;
__temp_168__ = (__temp_166__ + __temp_167__);
double __temp_169__;
__temp_169__ = (5 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__)]);
double __temp_170__;
__temp_170__ = (__temp_168__ + __temp_169__);
double __temp_171__;
__temp_171__ = (12 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__)]);
double __temp_172__;
__temp_172__ = (__temp_170__ + __temp_171__);
double __temp_173__;
__temp_173__ = (15 * __var_2__[__iter_6__+(M-0)*(__iter_7__)]);
double __temp_174__;
__temp_174__ = (__temp_172__ + __temp_173__);
double __temp_175__;
__temp_175__ = (12 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__)]);
double __temp_176__;
__temp_176__ = (__temp_174__ + __temp_175__);
double __temp_177__;
__temp_177__ = (5 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__)]);
double __temp_178__;
__temp_178__ = (__temp_176__ + __temp_177__);
double __temp_179__;
__temp_179__ = (4 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(1))]);
double __temp_180__;
__temp_180__ = (__temp_178__ + __temp_179__);
double __temp_181__;
__temp_181__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(1))]);
double __temp_182__;
__temp_182__ = (__temp_180__ + __temp_181__);
double __temp_183__;
__temp_183__ = (12 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(1))]);
double __temp_184__;
__temp_184__ = (__temp_182__ + __temp_183__);
double __temp_185__;
__temp_185__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(1))]);
double __temp_186__;
__temp_186__ = (__temp_184__ + __temp_185__);
double __temp_187__;
__temp_187__ = (4 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(1))]);
double __temp_188__;
__temp_188__ = (__temp_186__ + __temp_187__);
double __temp_189__;
__temp_189__ = (2 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(2))]);
double __temp_190__;
__temp_190__ = (__temp_188__ + __temp_189__);
double __temp_191__;
__temp_191__ = (4 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(2))]);
double __temp_192__;
__temp_192__ = (__temp_190__ + __temp_191__);
double __temp_193__;
__temp_193__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(2))]);
double __temp_194__;
__temp_194__ = (__temp_192__ + __temp_193__);
double __temp_195__;
__temp_195__ = (4 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(2))]);
double __temp_196__;
__temp_196__ = (__temp_194__ + __temp_195__);
double __temp_197__;
__temp_197__ = (2 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(2))]);
double __temp_198__;
__temp_198__ = (__temp_196__ + __temp_197__);
double __temp_199__;
__temp_199__ = (__temp_198__ / 159);
__var_1__[__iter_6__+(M-0)*(__iter_7__)] = __temp_199__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*((N-0)*(M-0)), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
hipMalloc(&__var_2__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
double * __var_3__;
hipMalloc(&__var_3__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
double * __var_4__;
hipMalloc(&__var_4__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-3) - 2 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-3) - 2 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<1000; x++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __var_4__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_4__, N, M, __var_3__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_3__, N, M, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, N, M, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
hipFree(__var_3__);
hipFree(__var_4__);
}
/*Host Free End*/
| 1906aae80277e5441d59a02fc3e3e548240b80f1.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#include <nvml.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, double * __restrict__ __var_4__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_0__ <= (M-3)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_1__ <= (N-3)){
double __temp_0__;
__temp_0__ = (2 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(-2))]);
double __temp_1__;
__temp_1__ = (4 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-2))]);
double __temp_2__;
__temp_2__ = (__temp_0__ + __temp_1__);
double __temp_3__;
__temp_3__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(-2))]);
double __temp_4__;
__temp_4__ = (__temp_2__ + __temp_3__);
double __temp_5__;
__temp_5__ = (4 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-2))]);
double __temp_6__;
__temp_6__ = (__temp_4__ + __temp_5__);
double __temp_7__;
__temp_7__ = (2 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(-2))]);
double __temp_8__;
__temp_8__ = (__temp_6__ + __temp_7__);
double __temp_9__;
__temp_9__ = (4 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(-1))]);
double __temp_10__;
__temp_10__ = (__temp_8__ + __temp_9__);
double __temp_11__;
__temp_11__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-1))]);
double __temp_12__;
__temp_12__ = (__temp_10__ + __temp_11__);
double __temp_13__;
__temp_13__ = (12 * input[__iter_0__+(M-0)*(__iter_1__+(-1))]);
double __temp_14__;
__temp_14__ = (__temp_12__ + __temp_13__);
double __temp_15__;
__temp_15__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-1))]);
double __temp_16__;
__temp_16__ = (__temp_14__ + __temp_15__);
double __temp_17__;
__temp_17__ = (4 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(-1))]);
double __temp_18__;
__temp_18__ = (__temp_16__ + __temp_17__);
double __temp_19__;
__temp_19__ = (5 * input[__iter_0__+(-2)+(M-0)*(__iter_1__)]);
double __temp_20__;
__temp_20__ = (__temp_18__ + __temp_19__);
double __temp_21__;
__temp_21__ = (12 * input[__iter_0__+(-1)+(M-0)*(__iter_1__)]);
double __temp_22__;
__temp_22__ = (__temp_20__ + __temp_21__);
double __temp_23__;
__temp_23__ = (15 * input[__iter_0__+(M-0)*(__iter_1__)]);
double __temp_24__;
__temp_24__ = (__temp_22__ + __temp_23__);
double __temp_25__;
__temp_25__ = (12 * input[__iter_0__+(1)+(M-0)*(__iter_1__)]);
double __temp_26__;
__temp_26__ = (__temp_24__ + __temp_25__);
double __temp_27__;
__temp_27__ = (5 * input[__iter_0__+(2)+(M-0)*(__iter_1__)]);
double __temp_28__;
__temp_28__ = (__temp_26__ + __temp_27__);
double __temp_29__;
__temp_29__ = (4 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(1))]);
double __temp_30__;
__temp_30__ = (__temp_28__ + __temp_29__);
double __temp_31__;
__temp_31__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(1))]);
double __temp_32__;
__temp_32__ = (__temp_30__ + __temp_31__);
double __temp_33__;
__temp_33__ = (12 * input[__iter_0__+(M-0)*(__iter_1__+(1))]);
double __temp_34__;
__temp_34__ = (__temp_32__ + __temp_33__);
double __temp_35__;
__temp_35__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(1))]);
double __temp_36__;
__temp_36__ = (__temp_34__ + __temp_35__);
double __temp_37__;
__temp_37__ = (4 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(1))]);
double __temp_38__;
__temp_38__ = (__temp_36__ + __temp_37__);
double __temp_39__;
__temp_39__ = (2 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(2))]);
double __temp_40__;
__temp_40__ = (__temp_38__ + __temp_39__);
double __temp_41__;
__temp_41__ = (4 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(2))]);
double __temp_42__;
__temp_42__ = (__temp_40__ + __temp_41__);
double __temp_43__;
__temp_43__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(2))]);
double __temp_44__;
__temp_44__ = (__temp_42__ + __temp_43__);
double __temp_45__;
__temp_45__ = (4 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(2))]);
double __temp_46__;
__temp_46__ = (__temp_44__ + __temp_45__);
double __temp_47__;
__temp_47__ = (2 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(2))]);
double __temp_48__;
__temp_48__ = (__temp_46__ + __temp_47__);
double __temp_49__;
__temp_49__ = (__temp_48__ / 159);
__var_4__[__iter_0__+(M-0)*(__iter_1__)] = __temp_49__;
}
}
}
__global__ void __kernel___forma_kernel__1__(double * __restrict__ __var_4__, int N, int M, double * __restrict__ __var_3__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_2__;
__iter_2__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_2__ <= (M-3)){
int __iter_3__;
__iter_3__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_3__ <= (N-3)){
double __temp_50__;
__temp_50__ = (2 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(-2))]);
double __temp_51__;
__temp_51__ = (4 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-2))]);
double __temp_52__;
__temp_52__ = (__temp_50__ + __temp_51__);
double __temp_53__;
__temp_53__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-2))]);
double __temp_54__;
__temp_54__ = (__temp_52__ + __temp_53__);
double __temp_55__;
__temp_55__ = (4 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-2))]);
double __temp_56__;
__temp_56__ = (__temp_54__ + __temp_55__);
double __temp_57__;
__temp_57__ = (2 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(-2))]);
double __temp_58__;
__temp_58__ = (__temp_56__ + __temp_57__);
double __temp_59__;
__temp_59__ = (4 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(-1))]);
double __temp_60__;
__temp_60__ = (__temp_58__ + __temp_59__);
double __temp_61__;
__temp_61__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-1))]);
double __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
double __temp_63__;
__temp_63__ = (12 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-1))]);
double __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
double __temp_65__;
__temp_65__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-1))]);
double __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
double __temp_67__;
__temp_67__ = (4 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(-1))]);
double __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
double __temp_69__;
__temp_69__ = (5 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__)]);
double __temp_70__;
__temp_70__ = (__temp_68__ + __temp_69__);
double __temp_71__;
__temp_71__ = (12 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__)]);
double __temp_72__;
__temp_72__ = (__temp_70__ + __temp_71__);
double __temp_73__;
__temp_73__ = (15 * __var_4__[__iter_2__+(M-0)*(__iter_3__)]);
double __temp_74__;
__temp_74__ = (__temp_72__ + __temp_73__);
double __temp_75__;
__temp_75__ = (12 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__)]);
double __temp_76__;
__temp_76__ = (__temp_74__ + __temp_75__);
double __temp_77__;
__temp_77__ = (5 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__)]);
double __temp_78__;
__temp_78__ = (__temp_76__ + __temp_77__);
double __temp_79__;
__temp_79__ = (4 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(1))]);
double __temp_80__;
__temp_80__ = (__temp_78__ + __temp_79__);
double __temp_81__;
__temp_81__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(1))]);
double __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
double __temp_83__;
__temp_83__ = (12 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(1))]);
double __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
double __temp_85__;
__temp_85__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(1))]);
double __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
double __temp_87__;
__temp_87__ = (4 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(1))]);
double __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
double __temp_89__;
__temp_89__ = (2 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(2))]);
double __temp_90__;
__temp_90__ = (__temp_88__ + __temp_89__);
double __temp_91__;
__temp_91__ = (4 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(2))]);
double __temp_92__;
__temp_92__ = (__temp_90__ + __temp_91__);
double __temp_93__;
__temp_93__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(2))]);
double __temp_94__;
__temp_94__ = (__temp_92__ + __temp_93__);
double __temp_95__;
__temp_95__ = (4 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(2))]);
double __temp_96__;
__temp_96__ = (__temp_94__ + __temp_95__);
double __temp_97__;
__temp_97__ = (2 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(2))]);
double __temp_98__;
__temp_98__ = (__temp_96__ + __temp_97__);
double __temp_99__;
__temp_99__ = (__temp_98__ / 159);
__var_3__[__iter_2__+(M-0)*(__iter_3__)] = __temp_99__;
}
}
}
__global__ void __kernel___forma_kernel__2__(double * __restrict__ __var_3__, int N, int M, double * __restrict__ __var_2__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_4__;
__iter_4__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_4__ <= (M-3)){
int __iter_5__;
__iter_5__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_5__ <= (N-3)){
double __temp_100__;
__temp_100__ = (2 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(-2))]);
double __temp_101__;
__temp_101__ = (4 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-2))]);
double __temp_102__;
__temp_102__ = (__temp_100__ + __temp_101__);
double __temp_103__;
__temp_103__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-2))]);
double __temp_104__;
__temp_104__ = (__temp_102__ + __temp_103__);
double __temp_105__;
__temp_105__ = (4 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-2))]);
double __temp_106__;
__temp_106__ = (__temp_104__ + __temp_105__);
double __temp_107__;
__temp_107__ = (2 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(-2))]);
double __temp_108__;
__temp_108__ = (__temp_106__ + __temp_107__);
double __temp_109__;
__temp_109__ = (4 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(-1))]);
double __temp_110__;
__temp_110__ = (__temp_108__ + __temp_109__);
double __temp_111__;
__temp_111__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-1))]);
double __temp_112__;
__temp_112__ = (__temp_110__ + __temp_111__);
double __temp_113__;
__temp_113__ = (12 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-1))]);
double __temp_114__;
__temp_114__ = (__temp_112__ + __temp_113__);
double __temp_115__;
__temp_115__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-1))]);
double __temp_116__;
__temp_116__ = (__temp_114__ + __temp_115__);
double __temp_117__;
__temp_117__ = (4 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(-1))]);
double __temp_118__;
__temp_118__ = (__temp_116__ + __temp_117__);
double __temp_119__;
__temp_119__ = (5 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__)]);
double __temp_120__;
__temp_120__ = (__temp_118__ + __temp_119__);
double __temp_121__;
__temp_121__ = (12 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__)]);
double __temp_122__;
__temp_122__ = (__temp_120__ + __temp_121__);
double __temp_123__;
__temp_123__ = (15 * __var_3__[__iter_4__+(M-0)*(__iter_5__)]);
double __temp_124__;
__temp_124__ = (__temp_122__ + __temp_123__);
double __temp_125__;
__temp_125__ = (12 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__)]);
double __temp_126__;
__temp_126__ = (__temp_124__ + __temp_125__);
double __temp_127__;
__temp_127__ = (5 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__)]);
double __temp_128__;
__temp_128__ = (__temp_126__ + __temp_127__);
double __temp_129__;
__temp_129__ = (4 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(1))]);
double __temp_130__;
__temp_130__ = (__temp_128__ + __temp_129__);
double __temp_131__;
__temp_131__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(1))]);
double __temp_132__;
__temp_132__ = (__temp_130__ + __temp_131__);
double __temp_133__;
__temp_133__ = (12 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(1))]);
double __temp_134__;
__temp_134__ = (__temp_132__ + __temp_133__);
double __temp_135__;
__temp_135__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(1))]);
double __temp_136__;
__temp_136__ = (__temp_134__ + __temp_135__);
double __temp_137__;
__temp_137__ = (4 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(1))]);
double __temp_138__;
__temp_138__ = (__temp_136__ + __temp_137__);
double __temp_139__;
__temp_139__ = (2 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(2))]);
double __temp_140__;
__temp_140__ = (__temp_138__ + __temp_139__);
double __temp_141__;
__temp_141__ = (4 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(2))]);
double __temp_142__;
__temp_142__ = (__temp_140__ + __temp_141__);
double __temp_143__;
__temp_143__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(2))]);
double __temp_144__;
__temp_144__ = (__temp_142__ + __temp_143__);
double __temp_145__;
__temp_145__ = (4 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(2))]);
double __temp_146__;
__temp_146__ = (__temp_144__ + __temp_145__);
double __temp_147__;
__temp_147__ = (2 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(2))]);
double __temp_148__;
__temp_148__ = (__temp_146__ + __temp_147__);
double __temp_149__;
__temp_149__ = (__temp_148__ / 159);
__var_2__[__iter_4__+(M-0)*(__iter_5__)] = __temp_149__;
}
}
}
__global__ void __kernel___forma_kernel__3__(double * __restrict__ __var_2__, int N, int M, double * __restrict__ __var_1__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_6__;
__iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_6__ <= (M-3)){
int __iter_7__;
__iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_7__ <= (N-3)){
double __temp_150__;
__temp_150__ = (2 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(-2))]);
double __temp_151__;
__temp_151__ = (4 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-2))]);
double __temp_152__;
__temp_152__ = (__temp_150__ + __temp_151__);
double __temp_153__;
__temp_153__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-2))]);
double __temp_154__;
__temp_154__ = (__temp_152__ + __temp_153__);
double __temp_155__;
__temp_155__ = (4 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-2))]);
double __temp_156__;
__temp_156__ = (__temp_154__ + __temp_155__);
double __temp_157__;
__temp_157__ = (2 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(-2))]);
double __temp_158__;
__temp_158__ = (__temp_156__ + __temp_157__);
double __temp_159__;
__temp_159__ = (4 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(-1))]);
double __temp_160__;
__temp_160__ = (__temp_158__ + __temp_159__);
double __temp_161__;
__temp_161__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-1))]);
double __temp_162__;
__temp_162__ = (__temp_160__ + __temp_161__);
double __temp_163__;
__temp_163__ = (12 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-1))]);
double __temp_164__;
__temp_164__ = (__temp_162__ + __temp_163__);
double __temp_165__;
__temp_165__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-1))]);
double __temp_166__;
__temp_166__ = (__temp_164__ + __temp_165__);
double __temp_167__;
__temp_167__ = (4 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(-1))]);
double __temp_168__;
__temp_168__ = (__temp_166__ + __temp_167__);
double __temp_169__;
__temp_169__ = (5 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__)]);
double __temp_170__;
__temp_170__ = (__temp_168__ + __temp_169__);
double __temp_171__;
__temp_171__ = (12 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__)]);
double __temp_172__;
__temp_172__ = (__temp_170__ + __temp_171__);
double __temp_173__;
__temp_173__ = (15 * __var_2__[__iter_6__+(M-0)*(__iter_7__)]);
double __temp_174__;
__temp_174__ = (__temp_172__ + __temp_173__);
double __temp_175__;
__temp_175__ = (12 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__)]);
double __temp_176__;
__temp_176__ = (__temp_174__ + __temp_175__);
double __temp_177__;
__temp_177__ = (5 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__)]);
double __temp_178__;
__temp_178__ = (__temp_176__ + __temp_177__);
double __temp_179__;
__temp_179__ = (4 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(1))]);
double __temp_180__;
__temp_180__ = (__temp_178__ + __temp_179__);
double __temp_181__;
__temp_181__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(1))]);
double __temp_182__;
__temp_182__ = (__temp_180__ + __temp_181__);
double __temp_183__;
__temp_183__ = (12 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(1))]);
double __temp_184__;
__temp_184__ = (__temp_182__ + __temp_183__);
double __temp_185__;
__temp_185__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(1))]);
double __temp_186__;
__temp_186__ = (__temp_184__ + __temp_185__);
double __temp_187__;
__temp_187__ = (4 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(1))]);
double __temp_188__;
__temp_188__ = (__temp_186__ + __temp_187__);
double __temp_189__;
__temp_189__ = (2 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(2))]);
double __temp_190__;
__temp_190__ = (__temp_188__ + __temp_189__);
double __temp_191__;
__temp_191__ = (4 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(2))]);
double __temp_192__;
__temp_192__ = (__temp_190__ + __temp_191__);
double __temp_193__;
__temp_193__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(2))]);
double __temp_194__;
__temp_194__ = (__temp_192__ + __temp_193__);
double __temp_195__;
__temp_195__ = (4 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(2))]);
double __temp_196__;
__temp_196__ = (__temp_194__ + __temp_195__);
double __temp_197__;
__temp_197__ = (2 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(2))]);
double __temp_198__;
__temp_198__ = (__temp_196__ + __temp_197__);
double __temp_199__;
__temp_199__ = (__temp_198__ / 159);
__var_1__[__iter_6__+(M-0)*(__iter_7__)] = __temp_199__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*((N-0)*(M-0)), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
cudaMalloc(&__var_2__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
double * __var_3__;
cudaMalloc(&__var_3__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
double * __var_4__;
cudaMalloc(&__var_4__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-3) - 2 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-3) - 2 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<1000; x++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __var_4__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_4__, N, M, __var_3__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_3__, N, M, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, N, M, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
cudaFree(__var_3__);
cudaFree(__var_4__);
}
/*Host Free End*/
|
088133b075a5010ff9369b2759b3184491666bd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_push2_stochastic(int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write, int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_relabel, int *d_stochastic, int *d_counter, bool *d_finish, int *g_block_num)
{
if (d_stochastic[blockIdx.y * (*g_block_num) + blockIdx.x] == 1)
{
int x1 = threadIdx.x;
int y1 = threadIdx.y;
int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
int thid = __umul24(y, width1) + x;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1 + 1, 34) + x1 + 1, temp_mult1 = __umul24(y1, 32) + x1;
height_fn[temp_mult] = g_graph_height[thid];
(threadIdx.x == 31 && x < width1 - 1) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0;
flow_push = g_push_reser[thid];
if (thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0)
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid];
min_flow_pushed = flow_push;
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
g_sink_weight[thid] = temp_weight;
atomicSub(&g_push_reser[thid], min_flow_pushed);
}
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_left_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_left_weight[thid], min_flow_pushed);
atomicAdd(&g_right_weight[thid - 1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid - 1], min_flow_pushed);
}
else atomicSub(&g_pull_left[thid - 1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_up_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_up_weight[thid], min_flow_pushed);
atomicAdd(&g_down_weight[thid - width1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid - width1], min_flow_pushed);
}
else atomicSub(&g_pull_up[thid - width1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_right_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_right_weight[thid], min_flow_pushed);
atomicAdd(&g_left_weight[thid + 1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid + 1], min_flow_pushed);
}
else atomicSub(&g_pull_right[thid + 1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_down_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_down_weight[thid], min_flow_pushed);
atomicAdd(&g_up_weight[thid + width1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid + width1], min_flow_pushed);
}
else atomicSub(&g_pull_down[thid + width1], 1);
}
__syncthreads();
min_flow_pushed = g_left_weight[thid];
flow_push = g_push_reser[thid];
if (flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2;
else
{
(flow_push > 0 && (((height_fn[temp_mult] == height_fn[temp_mult - 1] + 1) && g_left_weight[thid] > 0) || ((height_fn[temp_mult] == height_fn[temp_mult + 1] + 1) && g_right_weight[thid] > 0) || ((height_fn[temp_mult] == height_fn[temp_mult + 34] + 1) && g_down_weight[thid] > 0) || ((height_fn[temp_mult] == height_fn[temp_mult - 34] + 1) && g_up_weight[thid] > 0) || (height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0))) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0;
}
__syncthreads();
if (thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0)
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid];
min_flow_pushed = flow_push;
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
g_sink_weight[thid] = temp_weight;
atomicSub(&g_push_reser[thid], min_flow_pushed);
}
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_left_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_left_weight[thid], min_flow_pushed);
atomicAdd(&g_right_weight[thid - 1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid - 1], min_flow_pushed);
}
else atomicSub(&g_pull_left[thid - 1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_up_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_up_weight[thid], min_flow_pushed);
atomicAdd(&g_down_weight[thid - width1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid - width1], min_flow_pushed);
}
else atomicSub(&g_pull_up[thid - width1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_right_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_right_weight[thid], min_flow_pushed);
atomicAdd(&g_left_weight[thid + 1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid + 1], min_flow_pushed);
}
else atomicSub(&g_pull_right[thid + 1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_down_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_down_weight[thid], min_flow_pushed);
atomicAdd(&g_up_weight[thid + width1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid + width1], min_flow_pushed);
}
else atomicSub(&g_pull_down[thid + width1], 1);
}
}
} | 088133b075a5010ff9369b2759b3184491666bd8.cu | #include "includes.h"
__global__ void kernel_push2_stochastic(int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write, int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_relabel, int *d_stochastic, int *d_counter, bool *d_finish, int *g_block_num)
{
if (d_stochastic[blockIdx.y * (*g_block_num) + blockIdx.x] == 1)
{
int x1 = threadIdx.x;
int y1 = threadIdx.y;
int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
int thid = __umul24(y, width1) + x;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1 + 1, 34) + x1 + 1, temp_mult1 = __umul24(y1, 32) + x1;
height_fn[temp_mult] = g_graph_height[thid];
(threadIdx.x == 31 && x < width1 - 1) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0;
flow_push = g_push_reser[thid];
if (thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0)
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid];
min_flow_pushed = flow_push;
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
g_sink_weight[thid] = temp_weight;
atomicSub(&g_push_reser[thid], min_flow_pushed);
}
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_left_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_left_weight[thid], min_flow_pushed);
atomicAdd(&g_right_weight[thid - 1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid - 1], min_flow_pushed);
}
else atomicSub(&g_pull_left[thid - 1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_up_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_up_weight[thid], min_flow_pushed);
atomicAdd(&g_down_weight[thid - width1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid - width1], min_flow_pushed);
}
else atomicSub(&g_pull_up[thid - width1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_right_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_right_weight[thid], min_flow_pushed);
atomicAdd(&g_left_weight[thid + 1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid + 1], min_flow_pushed);
}
else atomicSub(&g_pull_right[thid + 1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_down_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_down_weight[thid], min_flow_pushed);
atomicAdd(&g_up_weight[thid + width1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid + width1], min_flow_pushed);
}
else atomicSub(&g_pull_down[thid + width1], 1);
}
__syncthreads();
min_flow_pushed = g_left_weight[thid];
flow_push = g_push_reser[thid];
if (flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2;
else
{
(flow_push > 0 && (((height_fn[temp_mult] == height_fn[temp_mult - 1] + 1) && g_left_weight[thid] > 0) || ((height_fn[temp_mult] == height_fn[temp_mult + 1] + 1) && g_right_weight[thid] > 0) || ((height_fn[temp_mult] == height_fn[temp_mult + 34] + 1) && g_down_weight[thid] > 0) || ((height_fn[temp_mult] == height_fn[temp_mult - 34] + 1) && g_up_weight[thid] > 0) || (height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0))) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0;
}
__syncthreads();
if (thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0)
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid];
min_flow_pushed = flow_push;
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
g_sink_weight[thid] = temp_weight;
atomicSub(&g_push_reser[thid], min_flow_pushed);
}
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_left_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_left_weight[thid], min_flow_pushed);
atomicAdd(&g_right_weight[thid - 1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid - 1], min_flow_pushed);
}
else atomicSub(&g_pull_left[thid - 1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_up_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_up_weight[thid], min_flow_pushed);
atomicAdd(&g_down_weight[thid - width1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid - width1], min_flow_pushed);
}
else atomicSub(&g_pull_up[thid - width1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_right_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1)
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_right_weight[thid], min_flow_pushed);
atomicAdd(&g_left_weight[thid + 1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid + 1], min_flow_pushed);
}
else atomicSub(&g_pull_right[thid + 1], 1);
flow_push = g_push_reser[thid];
min_flow_pushed = flow_push;
temp_weight = g_down_weight[thid];
if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed;
atomicSub(&g_down_weight[thid], min_flow_pushed);
atomicAdd(&g_up_weight[thid + width1], min_flow_pushed);
atomicSub(&g_push_reser[thid], min_flow_pushed);
atomicAdd(&g_push_reser[thid + width1], min_flow_pushed);
}
else atomicSub(&g_pull_down[thid + width1], 1);
}
}
} |
3e788bb00fb75b169f260656a3e7ec7eb7a98b5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////using more than 8gb.
//typedef unsigned char byte;
void shuffle(long long int *array, long long int n)
{
if (n > 1){
long long int i;
for (i = 0; i < n - 1; i++){
long long int j = i + rand() / (RAND_MAX / (n - i) + 1);
long long int t = array[j];
array[j] = array[i];
array[i] = t;
}
}
}
void init_cpu_data(unsigned *A, unsigned size, unsigned stride, unsigned mod, long long int iterations){
if(0){////////////normal
for (unsigned i = 0; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
for (unsigned i = 7; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
A[size - stride]=0;
A[size - stride + 7]=0;
}
if(1){////////////reversed
for (unsigned i = 0; i <= size - stride; i = i + stride){
A[i]=(i - stride);
}
for (unsigned i = 7; i <= size - stride + 7; i = i + stride){
A[i]=(i - stride);
}
A[0]=size - stride;
A[7]=size - stride + 7;
}
if(0){////////////random
long long int *rand_sequence;
rand_sequence = (long long int*)malloc(sizeof(long long int) * iterations);
//////random sequence offset 0
for(long long int i = 0; i < iterations; i++){
rand_sequence[i] = i;
}
//srand (time(NULL));
srand(1);
shuffle(rand_sequence, iterations);
long long int previous_rand_num;
long long int rand_num = rand_sequence[0] * stride;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride);////back to beginning
//////random sequence offset 7
//for(int i = 0; i < iterations; i++){
// rand_sequence[i] = i;
//}
//srand (time(NULL));
//shuffle(rand_sequence, iterations);
rand_num = rand_sequence[0] * stride + 7;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride + 7;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride + 7);////back to beginning
}
/*
///////manually set the nodes
A[32]=104333344;
A[104333344]=200802336;
A[200802336]=353370144;
A[353370144]=372244512;
A[372244512]=110100512;
A[110100512]=182452256;
A[182452256]=333971488;
A[333971488]=225443872;
A[225443872]=155189280;
A[155189280]=104333344;
*/
}
__device__ void P_chasing0(int mark, unsigned *A, int iterations, int *B, int *C, unsigned *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//unsigned start_time = 0;//////clock
//unsigned end_time = 0;//////clock
//start_time = clock64();//////clock
for (long long int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//unsigned total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, unsigned *A, unsigned iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ unsigned s_index[1024 * 4];
//__shared__ int s_index[1];
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u32 t1;\n\t"
".reg .u64 t2;\n\t"
".reg .u32 t3;\n\t"
".reg .u32 t4;\n\t"
".reg .u64 t5;\n\t"
".reg .u32 t6;\n\t"
".reg .u64 t7;\n\t"
"cvta.to.shared.u64 t5, %0;\n\t"
"cvt.u32.u64 t6, t5;\n\t"
:: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed??
for (unsigned it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
asm("shl.b32 t1, %3, 2;\n\t"
"cvt.u64.u32 t7, t1;\n\t"
"add.u64 t2, t7, %4;\n\t"
"shl.b32 t3, %6, 2;\n\t"
"add.u32 t4, t3, t6;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"st.shared.u32 [t4], %2;\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "l"(s_index), "r"(it));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (unsigned it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, float clock_rate, unsigned mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(unsigned) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
unsigned counter = 0;
for(unsigned data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(unsigned mod2 = 1 * 16 * 1024; mod2 <= 1073741824; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;//////when size gets larger than 32MB(8388608), an additional latency is added. Is it prefetching? cpu cache or tlb?
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipHostMalloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, hipHostMallocDefault));//////////using pinned memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
//checkCudaErrors(hipFree(CPU_data_in));
checkCudaErrors(hipHostFree(CPU_data_in));//////using pinned memory
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2147483648;
if(mod > 3221225472){
mod = 3221225472;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipHostMalloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, hipHostMallocDefault));//////////using pinned memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
//checkCudaErrors(hipFree(CPU_data_in));
checkCudaErrors(hipHostFree(CPU_data_in));//////using pinned memory
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2684354560;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipHostMalloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, hipHostMallocDefault));//////////using pinned memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
//checkCudaErrors(hipFree(CPU_data_in));
checkCudaErrors(hipHostFree(CPU_data_in));//////using pinned memory
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
| 3e788bb00fb75b169f260656a3e7ec7eb7a98b5d.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////using more than 8gb.
//typedef unsigned char byte;
void shuffle(long long int *array, long long int n)
{
if (n > 1){
long long int i;
for (i = 0; i < n - 1; i++){
long long int j = i + rand() / (RAND_MAX / (n - i) + 1);
long long int t = array[j];
array[j] = array[i];
array[i] = t;
}
}
}
void init_cpu_data(unsigned *A, unsigned size, unsigned stride, unsigned mod, long long int iterations){
if(0){////////////normal
for (unsigned i = 0; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
for (unsigned i = 7; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
A[size - stride]=0;
A[size - stride + 7]=0;
}
if(1){////////////reversed
for (unsigned i = 0; i <= size - stride; i = i + stride){
A[i]=(i - stride);
}
for (unsigned i = 7; i <= size - stride + 7; i = i + stride){
A[i]=(i - stride);
}
A[0]=size - stride;
A[7]=size - stride + 7;
}
if(0){////////////random
long long int *rand_sequence;
rand_sequence = (long long int*)malloc(sizeof(long long int) * iterations);
//////random sequence offset 0
for(long long int i = 0; i < iterations; i++){
rand_sequence[i] = i;
}
//srand (time(NULL));
srand(1);
shuffle(rand_sequence, iterations);
long long int previous_rand_num;
long long int rand_num = rand_sequence[0] * stride;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride);////back to beginning
//////random sequence offset 7
//for(int i = 0; i < iterations; i++){
// rand_sequence[i] = i;
//}
//srand (time(NULL));
//shuffle(rand_sequence, iterations);
rand_num = rand_sequence[0] * stride + 7;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride + 7;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride + 7);////back to beginning
}
/*
///////manually set the nodes
A[32]=104333344;
A[104333344]=200802336;
A[200802336]=353370144;
A[353370144]=372244512;
A[372244512]=110100512;
A[110100512]=182452256;
A[182452256]=333971488;
A[333971488]=225443872;
A[225443872]=155189280;
A[155189280]=104333344;
*/
}
__device__ void P_chasing0(int mark, unsigned *A, int iterations, int *B, int *C, unsigned *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//unsigned start_time = 0;//////clock
//unsigned end_time = 0;//////clock
//start_time = clock64();//////clock
for (long long int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//unsigned total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, unsigned *A, unsigned iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ unsigned s_index[1024 * 4];
//__shared__ int s_index[1];
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u32 t1;\n\t"
".reg .u64 t2;\n\t"
".reg .u32 t3;\n\t"
".reg .u32 t4;\n\t"
".reg .u64 t5;\n\t"
".reg .u32 t6;\n\t"
".reg .u64 t7;\n\t"
"cvta.to.shared.u64 t5, %0;\n\t"
"cvt.u32.u64 t6, t5;\n\t"
:: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed??
for (unsigned it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
asm("shl.b32 t1, %3, 2;\n\t"
"cvt.u64.u32 t7, t1;\n\t"
"add.u64 t2, t7, %4;\n\t"
"shl.b32 t3, %6, 2;\n\t"
"add.u32 t4, t3, t6;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"st.shared.u32 [t4], %2;\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "l"(s_index), "r"(it));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (unsigned it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, float clock_rate, unsigned mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(unsigned) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
unsigned counter = 0;
for(unsigned data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(unsigned mod2 = 1 * 16 * 1024; mod2 <= 1073741824; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;//////when size gets larger than 32MB(8388608), an additional latency is added. Is it prefetching? cpu cache or tlb?
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaHostAlloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, cudaHostAllocDefault));//////////using pinned memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
//checkCudaErrors(cudaFree(CPU_data_in));
checkCudaErrors(cudaFreeHost(CPU_data_in));//////using pinned memory
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2147483648;
if(mod > 3221225472){
mod = 3221225472;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaHostAlloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, cudaHostAllocDefault));//////////using pinned memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
//checkCudaErrors(cudaFree(CPU_data_in));
checkCudaErrors(cudaFreeHost(CPU_data_in));//////using pinned memory
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2684354560;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaHostAlloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, cudaHostAllocDefault));//////////using pinned memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
//checkCudaErrors(cudaFree(CPU_data_in));
checkCudaErrors(cudaFreeHost(CPU_data_in));//////using pinned memory
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
c82d3595b0d09316d3653c1d1bed5e1cf02004ab.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "gputimer.h"
#include <time.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Utilities and system includes
//#include "helper_functions.h"
#define DEBUG
#define cudaCheckErrors() { \
hipError_t error=hipGetLastError(); \
if(error!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
//////////////////////////////////////////////////////////////////////////////////////
//Host Code
//////////////////////////////////////////////////////////////////////////////////////
void convolutionHostRow(double **input, double **output, int dim_x, int dim_y) {
for (int x= 0; x < dim_x; x++) {
for (int y=0 ; y < dim_y; y++) {
output[x][y] = -input[x][y];
}
}
}
void convolutionHostColumn(double **input, double **output, int dim_x, int dim_y) {
for (int x= 0; x < dim_x; x++) {
for (int y=0 ; y < dim_y; y++) {
output[x][y] = input[y][x];
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
//Device Code
//////////////////////////////////////////////////////////////////////////////////////
__global__ void
convolutionDeviceRow(double *input, double *output, int dim_x, int dim_y){
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x * blockDim.x + threadIdx.x;
int pos_x = x + y*dim_x;
output[pos_x] = -input[pos_x];
}
__global__ void
convolutionDeviceColumn(double *input, double *output, int dim_x, int dim_y){
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x * blockDim.x + threadIdx.x;
output[x + y*blockDim.x] = input[y + x*blockDim.y];
}
int main(int argc, char const *argv[]) {
double
**h_matrix ,
**h_buffer ,
**h_OutputCPU ,
*h_OutputGPU ,
*h_trans_matrix;
double
*d_matrix ,
*d_buffer ,
*d_output;
double overal_GPU_time = 0, overal_CPU_time = 0, overal_data_transfer_time = 0;
clock_t start, stop;
GpuTimer timer;
int dim_x = 1, dim_y = 1;
#ifdef DEBUG
int max_withd = 3;
#endif
if ( argc != 3) {
printf("Insuffisend arguments exiting\n");
exit(EXIT_FAILURE);
} else {
dim_x = atoi(argv[1]);
dim_y = atoi(argv[2]);
}
printf("Initializing host matricies...\n");
h_matrix = (double**)malloc(sizeof(double*)*dim_x);
if ( h_matrix == NULL){
fprintf(stderr, "Error in Host Matrix allocation\n");
exit(EXIT_FAILURE);
}
for ( int i = 0; i < dim_y; i++){
h_matrix[i] = (double*)malloc( sizeof(double) * dim_y);
if (h_matrix[i] == NULL){
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
for ( int j = 0; j < dim_y; j++) {
h_matrix[i][j] = j + (i * dim_x);
}
}
h_buffer = (double**)malloc(sizeof(double*)*dim_y);
if ( h_buffer == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
for ( int i = 0; i < dim_x; i++){
h_buffer[i] = (double*)realloc(h_buffer[i], sizeof(double) * dim_y);
if ( h_buffer[i] == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
}
h_OutputCPU = (double**)malloc(sizeof(double*)*dim_y);
if ( h_buffer == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
for ( int i = 0; i < dim_x; i++){
h_OutputCPU[i] = (double*)realloc(h_OutputCPU[i], sizeof(double) * dim_y);
if ( h_OutputCPU[i] == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
}
h_OutputGPU = (double*)malloc(sizeof(double)*dim_y*dim_x);
if ( h_buffer == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
h_trans_matrix = (double*)malloc(sizeof(double)*dim_x*dim_y);
if (h_trans_matrix == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
printf("Initializing device matricies...\n");
d_matrix = NULL;
hipMalloc((void**)&d_matrix, sizeof(double)*dim_x*dim_y);
for ( int i =0 ; i < dim_x; i++) {
for ( int j = 0; j < dim_y; j++) {
h_trans_matrix[j + (i * dim_x)] = h_matrix[i][j];
}
}
d_buffer = NULL;
hipMalloc((void**)&d_buffer, sizeof(double)*dim_x*dim_y);
d_output = NULL;
hipMalloc((void**)&d_output, sizeof(double)*dim_x*dim_y);
printf("Transfering data to cuda Device...\n");
timer.Start();
hipMemcpy( d_matrix, h_trans_matrix, dim_x*dim_y*sizeof(double) , hipMemcpyHostToDevice);
timer.Stop();
overal_data_transfer_time += timer.Elapsed();
cudaCheckErrors();
printf("Running CPU code...\n");
start = clock();
convolutionHostRow(h_matrix, h_buffer, dim_x, dim_y);
convolutionHostColumn(h_buffer, h_OutputCPU, dim_x, dim_y);
stop = clock();
//kernel prep
printf("Running GPU code...\n");
int threadsPerBlock;
int blocksPerGrid;
if ( dim_x > 32 || dim_y > 32) {
threadsPerBlock = 32;
blocksPerGrid = dim_x/32;
} else {
threadsPerBlock = dim_x;
blocksPerGrid = 1;
}
dim3 threads(threadsPerBlock, threadsPerBlock);
dim3 grid(blocksPerGrid, blocksPerGrid);
printf("CUDA kernel launch %d blocks of %d threads\n", grid.x*grid.y, threads.x*threads.y);
timer.Start();
hipLaunchKernelGGL(( convolutionDeviceRow), dim3(grid), dim3(threads), 0, 0, d_matrix, d_buffer, dim_x, dim_y);
timer.Stop();
overal_GPU_time += timer.Elapsed();
hipDeviceSynchronize();
cudaCheckErrors();
printf("CUDA kernel launch %d blocks of %d threads\n", grid.x*grid.y, threads.x*threads.y);
timer.Start();
hipLaunchKernelGGL(( convolutionDeviceColumn), dim3(grid), dim3(threads), 0, 0, d_buffer, d_output, dim_x, dim_y);
timer.Stop();
overal_GPU_time += timer.Elapsed();
hipDeviceSynchronize();
cudaCheckErrors();
timer.Start();
hipMemcpy( h_OutputGPU, d_output, sizeof(double)*dim_x*dim_y, hipMemcpyDeviceToHost);
timer.Stop();
overal_data_transfer_time += timer.Elapsed();
cudaCheckErrors();
printf("\nTime elapsed on GPU( computation) = %g ms\n", overal_GPU_time);
printf("Time elapsed on GPU( memory transfers) = %g ms\n", overal_data_transfer_time);
printf("\nTime elapsed on GPU( overal) = %g ms\n", overal_GPU_time + overal_data_transfer_time);
overal_CPU_time = (double)(stop - start) * 1000.0 / CLOCKS_PER_SEC ;
printf ("\nTime elapsed on CPU = %g ms\n", overal_CPU_time);
#ifdef DEBUG
printf("\nInput Matrix\n********************************************************************\n" );
for ( int i = 0; i < dim_x; i++){
for (int j = 0; j < dim_y; j++) {
printf(" %*g", max_withd, h_matrix[i][j]);
}
printf("\n");
}
printf("\nCPU Matrix\n********************************************************************\n" );
for ( int i = 0; i < dim_x; i++){
for (int j = 0; j < dim_y; j++) {
printf(" %*g", max_withd, h_OutputCPU[i][j]);
}
printf("\n");
}
printf("\nGPU Matrix\n********************************************************************\n" );
for ( int i = 0; i < dim_x; i++){
for (int j = 0; j < dim_y; j++) {
printf(" %*g", max_withd, h_OutputGPU[j + (i * dim_x)]);
}
printf("\n");
}
#endif
for ( int i = 0; i < dim_x; i++) {
free(h_OutputCPU[i]);
free(h_buffer[i]);
free(h_matrix[i]);
}
free(h_trans_matrix);
free(h_OutputGPU);
hipFree(d_output);
cudaCheckErrors();
hipFree(d_matrix);
cudaCheckErrors();
hipFree(d_buffer);
cudaCheckErrors();
hipDeviceReset();
cudaCheckErrors();
return 0;
}
| c82d3595b0d09316d3653c1d1bed5e1cf02004ab.cu | #include <stdio.h>
#include <stdlib.h>
#include "gputimer.h"
#include <time.h>
// CUDA runtime
#include <cuda_runtime.h>
// Utilities and system includes
//#include "helper_functions.h"
#define DEBUG
#define cudaCheckErrors() { \
cudaError_t error=cudaGetLastError(); \
if(error!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
//////////////////////////////////////////////////////////////////////////////////////
//Host Code
//////////////////////////////////////////////////////////////////////////////////////
void convolutionHostRow(double **input, double **output, int dim_x, int dim_y) {
for (int x= 0; x < dim_x; x++) {
for (int y=0 ; y < dim_y; y++) {
output[x][y] = -input[x][y];
}
}
}
void convolutionHostColumn(double **input, double **output, int dim_x, int dim_y) {
for (int x= 0; x < dim_x; x++) {
for (int y=0 ; y < dim_y; y++) {
output[x][y] = input[y][x];
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
//Device Code
//////////////////////////////////////////////////////////////////////////////////////
__global__ void
convolutionDeviceRow(double *input, double *output, int dim_x, int dim_y){
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x * blockDim.x + threadIdx.x;
int pos_x = x + y*dim_x;
output[pos_x] = -input[pos_x];
}
__global__ void
convolutionDeviceColumn(double *input, double *output, int dim_x, int dim_y){
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x * blockDim.x + threadIdx.x;
output[x + y*blockDim.x] = input[y + x*blockDim.y];
}
int main(int argc, char const *argv[]) {
double
**h_matrix ,
**h_buffer ,
**h_OutputCPU ,
*h_OutputGPU ,
*h_trans_matrix;
double
*d_matrix ,
*d_buffer ,
*d_output;
double overal_GPU_time = 0, overal_CPU_time = 0, overal_data_transfer_time = 0;
clock_t start, stop;
GpuTimer timer;
int dim_x = 1, dim_y = 1;
#ifdef DEBUG
int max_withd = 3;
#endif
if ( argc != 3) {
printf("Insuffisend arguments exiting\n");
exit(EXIT_FAILURE);
} else {
dim_x = atoi(argv[1]);
dim_y = atoi(argv[2]);
}
printf("Initializing host matricies...\n");
h_matrix = (double**)malloc(sizeof(double*)*dim_x);
if ( h_matrix == NULL){
fprintf(stderr, "Error in Host Matrix allocation\n");
exit(EXIT_FAILURE);
}
for ( int i = 0; i < dim_y; i++){
h_matrix[i] = (double*)malloc( sizeof(double) * dim_y);
if (h_matrix[i] == NULL){
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
for ( int j = 0; j < dim_y; j++) {
h_matrix[i][j] = j + (i * dim_x);
}
}
h_buffer = (double**)malloc(sizeof(double*)*dim_y);
if ( h_buffer == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
for ( int i = 0; i < dim_x; i++){
h_buffer[i] = (double*)realloc(h_buffer[i], sizeof(double) * dim_y);
if ( h_buffer[i] == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
}
h_OutputCPU = (double**)malloc(sizeof(double*)*dim_y);
if ( h_buffer == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
for ( int i = 0; i < dim_x; i++){
h_OutputCPU[i] = (double*)realloc(h_OutputCPU[i], sizeof(double) * dim_y);
if ( h_OutputCPU[i] == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
}
h_OutputGPU = (double*)malloc(sizeof(double)*dim_y*dim_x);
if ( h_buffer == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
h_trans_matrix = (double*)malloc(sizeof(double)*dim_x*dim_y);
if (h_trans_matrix == NULL) {
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
printf("Initializing device matricies...\n");
d_matrix = NULL;
cudaMalloc((void**)&d_matrix, sizeof(double)*dim_x*dim_y);
for ( int i =0 ; i < dim_x; i++) {
for ( int j = 0; j < dim_y; j++) {
h_trans_matrix[j + (i * dim_x)] = h_matrix[i][j];
}
}
d_buffer = NULL;
cudaMalloc((void**)&d_buffer, sizeof(double)*dim_x*dim_y);
d_output = NULL;
cudaMalloc((void**)&d_output, sizeof(double)*dim_x*dim_y);
printf("Transfering data to cuda Device...\n");
timer.Start();
cudaMemcpy( d_matrix, h_trans_matrix, dim_x*dim_y*sizeof(double) , cudaMemcpyHostToDevice);
timer.Stop();
overal_data_transfer_time += timer.Elapsed();
cudaCheckErrors();
printf("Running CPU code...\n");
start = clock();
convolutionHostRow(h_matrix, h_buffer, dim_x, dim_y);
convolutionHostColumn(h_buffer, h_OutputCPU, dim_x, dim_y);
stop = clock();
//kernel prep
printf("Running GPU code...\n");
int threadsPerBlock;
int blocksPerGrid;
if ( dim_x > 32 || dim_y > 32) {
threadsPerBlock = 32;
blocksPerGrid = dim_x/32;
} else {
threadsPerBlock = dim_x;
blocksPerGrid = 1;
}
dim3 threads(threadsPerBlock, threadsPerBlock);
dim3 grid(blocksPerGrid, blocksPerGrid);
printf("CUDA kernel launch %d blocks of %d threads\n", grid.x*grid.y, threads.x*threads.y);
timer.Start();
convolutionDeviceRow<<<grid, threads>>>( d_matrix, d_buffer, dim_x, dim_y);
timer.Stop();
overal_GPU_time += timer.Elapsed();
cudaDeviceSynchronize();
cudaCheckErrors();
printf("CUDA kernel launch %d blocks of %d threads\n", grid.x*grid.y, threads.x*threads.y);
timer.Start();
convolutionDeviceColumn<<<grid, threads>>>( d_buffer, d_output, dim_x, dim_y);
timer.Stop();
overal_GPU_time += timer.Elapsed();
cudaDeviceSynchronize();
cudaCheckErrors();
timer.Start();
cudaMemcpy( h_OutputGPU, d_output, sizeof(double)*dim_x*dim_y, cudaMemcpyDeviceToHost);
timer.Stop();
overal_data_transfer_time += timer.Elapsed();
cudaCheckErrors();
printf("\nTime elapsed on GPU( computation) = %g ms\n", overal_GPU_time);
printf("Time elapsed on GPU( memory transfers) = %g ms\n", overal_data_transfer_time);
printf("\nTime elapsed on GPU( overal) = %g ms\n", overal_GPU_time + overal_data_transfer_time);
overal_CPU_time = (double)(stop - start) * 1000.0 / CLOCKS_PER_SEC ;
printf ("\nTime elapsed on CPU = %g ms\n", overal_CPU_time);
#ifdef DEBUG
printf("\nInput Matrix\n********************************************************************\n" );
for ( int i = 0; i < dim_x; i++){
for (int j = 0; j < dim_y; j++) {
printf(" %*g", max_withd, h_matrix[i][j]);
}
printf("\n");
}
printf("\nCPU Matrix\n********************************************************************\n" );
for ( int i = 0; i < dim_x; i++){
for (int j = 0; j < dim_y; j++) {
printf(" %*g", max_withd, h_OutputCPU[i][j]);
}
printf("\n");
}
printf("\nGPU Matrix\n********************************************************************\n" );
for ( int i = 0; i < dim_x; i++){
for (int j = 0; j < dim_y; j++) {
printf(" %*g", max_withd, h_OutputGPU[j + (i * dim_x)]);
}
printf("\n");
}
#endif
for ( int i = 0; i < dim_x; i++) {
free(h_OutputCPU[i]);
free(h_buffer[i]);
free(h_matrix[i]);
}
free(h_trans_matrix);
free(h_OutputGPU);
cudaFree(d_output);
cudaCheckErrors();
cudaFree(d_matrix);
cudaCheckErrors();
cudaFree(d_buffer);
cudaCheckErrors();
cudaDeviceReset();
cudaCheckErrors();
return 0;
}
|
d3321beacc63d40fc8e5e2784098365044dd5c31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/range_op.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void RangeKernel(T start, T step, int64_t size, T* out) {
CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; }
}
template <typename T>
class CUDARangeKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* start_t = context.Input<framework::Tensor>("Start");
auto* end_t = context.Input<framework::Tensor>("End");
auto* step_t = context.Input<framework::Tensor>("Step");
auto* out = context.Output<framework::Tensor>("Out");
T start = GetValue<T>(start_t);
T end = GetValue<T>(end_t);
T step = GetValue<T>(step_t);
int64_t size = 0;
GetSize(start, end, step, &size);
out->Resize(framework::make_ddim({size}));
T* out_data = out->mutable_data<T>(context.GetPlace());
auto stream = context.cuda_device_context().stream();
int block = ::min(size, static_cast<int64_t>(256));
int grid = (size + block - 1) / block;
hipLaunchKernelGGL(( RangeKernel<T>), dim3(grid), dim3(block), 0, stream, start, step, size, out_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(range, ops::CUDARangeKernel<int>,
ops::CUDARangeKernel<int64_t>,
ops::CUDARangeKernel<float>,
ops::CUDARangeKernel<double>);
| d3321beacc63d40fc8e5e2784098365044dd5c31.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/range_op.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void RangeKernel(T start, T step, int64_t size, T* out) {
CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; }
}
template <typename T>
class CUDARangeKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* start_t = context.Input<framework::Tensor>("Start");
auto* end_t = context.Input<framework::Tensor>("End");
auto* step_t = context.Input<framework::Tensor>("Step");
auto* out = context.Output<framework::Tensor>("Out");
T start = GetValue<T>(start_t);
T end = GetValue<T>(end_t);
T step = GetValue<T>(step_t);
int64_t size = 0;
GetSize(start, end, step, &size);
out->Resize(framework::make_ddim({size}));
T* out_data = out->mutable_data<T>(context.GetPlace());
auto stream = context.cuda_device_context().stream();
int block = std::min(size, static_cast<int64_t>(256));
int grid = (size + block - 1) / block;
RangeKernel<T><<<grid, block, 0, stream>>>(start, step, size, out_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(range, ops::CUDARangeKernel<int>,
ops::CUDARangeKernel<int64_t>,
ops::CUDARangeKernel<float>,
ops::CUDARangeKernel<double>);
|
04ad3dd55f86b9fc5a42121cbe3119cb445a0918.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <metrics/klDivergence.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct klDivergenceParam {
int nElements;
double tolerance;
};
//test fixture class
template <typename DataT>
class klDivergenceTest : public ::testing::TestWithParam<klDivergenceParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<klDivergenceParam>::GetParam();
nElements = params.nElements;
//generating random value test input
std::vector<DataT> h_modelPDF(nElements, 0);
std::vector<DataT> h_candidatePDF(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_real_distribution<DataT> realGenerator(0.0, 1.0);
std::generate(h_modelPDF.begin(), h_modelPDF.end(),
[&]() { return realGenerator(dre); });
std::generate(h_candidatePDF.begin(), h_candidatePDF.end(),
[&]() { return realGenerator(dre); });
//allocating and initializing memory to the GPU
CUDA_CHECK(hipStreamCreate(&stream));
MLCommon::allocate(d_modelPDF, nElements, true);
MLCommon::allocate(d_candidatePDF, nElements, true);
MLCommon::updateDevice(d_modelPDF, &h_modelPDF[0], (int)nElements, stream);
MLCommon::updateDevice(d_candidatePDF, &h_candidatePDF[0], (int)nElements,
stream);
std::shared_ptr<MLCommon::deviceAllocator> allocator(
new raft::mr::device::default_allocator);
//generating the golden output
for (int i = 0; i < nElements; ++i) {
if (h_modelPDF[i] == 0.0)
truthklDivergence += 0;
else
truthklDivergence +=
h_modelPDF[i] * log(h_modelPDF[i] / h_candidatePDF[i]);
}
//calling the klDivergence CUDA implementation
computedklDivergence = MLCommon::Metrics::klDivergence(
d_modelPDF, d_candidatePDF, nElements, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(hipFree(d_modelPDF));
CUDA_CHECK(hipFree(d_candidatePDF));
CUDA_CHECK(hipStreamDestroy(stream));
}
//declaring the data values
klDivergenceParam params;
DataT* d_modelPDF = nullptr;
DataT* d_candidatePDF = nullptr;
int nElements = 0;
DataT truthklDivergence = 0;
DataT computedklDivergence = 0;
hipStream_t stream;
};
//setting test parameter values
const std::vector<klDivergenceParam> inputs = {
{500, 0.000001}, {200, 0.001}, {5000, 0.000001}, {500000, 0.000001}
};
//writing the test suite
typedef klDivergenceTest<double> klDivergenceTestClass;
TEST_P(klDivergenceTestClass, Result) {
ASSERT_NEAR(computedklDivergence, truthklDivergence, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(klDivergence, klDivergenceTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
| 04ad3dd55f86b9fc5a42121cbe3119cb445a0918.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <metrics/klDivergence.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct klDivergenceParam {
int nElements;
double tolerance;
};
//test fixture class
template <typename DataT>
class klDivergenceTest : public ::testing::TestWithParam<klDivergenceParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<klDivergenceParam>::GetParam();
nElements = params.nElements;
//generating random value test input
std::vector<DataT> h_modelPDF(nElements, 0);
std::vector<DataT> h_candidatePDF(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_real_distribution<DataT> realGenerator(0.0, 1.0);
std::generate(h_modelPDF.begin(), h_modelPDF.end(),
[&]() { return realGenerator(dre); });
std::generate(h_candidatePDF.begin(), h_candidatePDF.end(),
[&]() { return realGenerator(dre); });
//allocating and initializing memory to the GPU
CUDA_CHECK(cudaStreamCreate(&stream));
MLCommon::allocate(d_modelPDF, nElements, true);
MLCommon::allocate(d_candidatePDF, nElements, true);
MLCommon::updateDevice(d_modelPDF, &h_modelPDF[0], (int)nElements, stream);
MLCommon::updateDevice(d_candidatePDF, &h_candidatePDF[0], (int)nElements,
stream);
std::shared_ptr<MLCommon::deviceAllocator> allocator(
new raft::mr::device::default_allocator);
//generating the golden output
for (int i = 0; i < nElements; ++i) {
if (h_modelPDF[i] == 0.0)
truthklDivergence += 0;
else
truthklDivergence +=
h_modelPDF[i] * log(h_modelPDF[i] / h_candidatePDF[i]);
}
//calling the klDivergence CUDA implementation
computedklDivergence = MLCommon::Metrics::klDivergence(
d_modelPDF, d_candidatePDF, nElements, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(cudaFree(d_modelPDF));
CUDA_CHECK(cudaFree(d_candidatePDF));
CUDA_CHECK(cudaStreamDestroy(stream));
}
//declaring the data values
klDivergenceParam params;
DataT* d_modelPDF = nullptr;
DataT* d_candidatePDF = nullptr;
int nElements = 0;
DataT truthklDivergence = 0;
DataT computedklDivergence = 0;
cudaStream_t stream;
};
//setting test parameter values
const std::vector<klDivergenceParam> inputs = {
{500, 0.000001}, {200, 0.001}, {5000, 0.000001}, {500000, 0.000001}
};
//writing the test suite
typedef klDivergenceTest<double> klDivergenceTestClass;
TEST_P(klDivergenceTestClass, Result) {
ASSERT_NEAR(computedklDivergence, truthklDivergence, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(klDivergence, klDivergenceTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
|
be1f31758ac6b6eae7a1294c1a697462b49cabeb.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_shuffledPos;
glm::vec3 *dev_shuffledVel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
hipDeviceSynchronize();
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
// 2.3 Additional buffers.
hipMalloc((void**)&dev_shuffledPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_shuffledPos failed!");
hipMalloc((void**)&dev_shuffledVel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_shuffledVel failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
glm::vec3 boidPos = pos[iSelf];
glm::vec3 perceivedCenter = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 percVel = glm::vec3(0.0f);;
int rule1Counter = 0;
int rule3Counter = 0;
for (int i = 0; i < N; i++) {
float distance = glm::distance(pos[i], boidPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (i != iSelf && distance < rule1Distance) {
perceivedCenter += pos[i];
rule1Counter++;
}
// Rule 2
if (i != iSelf && distance < rule2Distance) {
c -= (pos[i] - boidPos);
}
// Rule 3
if (i != iSelf && distance < rule3Distance) {
percVel += vel[i];
rule3Counter++;
}
}
if (rule1Counter != 0) {
perceivedCenter /= rule1Counter;
perceivedCenter = perceivedCenter - boidPos;
}
if (rule3Counter != 0) {
percVel /= rule3Counter;
}
return perceivedCenter*rule1Scale + c * rule2Scale + percVel * rule3Scale;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
vel2[index] = vel1[index] + computeVelocityChange(N, index, pos, vel1);
if (glm::length(vel2[index]) > maxSpeed) {
vel2[index] = glm::normalize(vel2[index]) * maxSpeed;
}
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__device__ int getGridCellQuadrant(float gridCellPos, float origin) {
if (gridCellPos > origin) {
return 1;
}
else {
return -1;
}
}
// Added 9/9/17
__device__ void checkGridCellAndUpdateVel(int x, int y, int z, int gridResolution,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2, int iSelf) {
if (x < 0 || x > gridResolution || y < 0 || y > gridResolution || z < 0 || z > gridResolution) {
return;
}
else {
int gridIndex = gridIndex3Dto1D(x, y, z, gridResolution);
int start = gridCellStartIndices[gridIndex];
int end = gridCellEndIndices[gridIndex];
if (start == -1 || end == -1) {
return;
}
// TODO(Wenli): Repetive code, consider refactoring.
glm::vec3 boidPos = pos[iSelf];
glm::vec3 perceivedCenter = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 percVel = glm::vec3(0.0f);;
int rule1Counter = 0;
int rule3Counter = 0;
for (int j = start; j <= end; j++) {
// Bug was here -_-
int i = particleArrayIndices[j];
float distance = glm::distance(pos[i], boidPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (i != iSelf && distance < rule1Distance) {
perceivedCenter += pos[i];
rule1Counter++;
}
// Rule 2
if (i != iSelf && distance < rule2Distance) {
c -= (pos[i] - boidPos);
}
// Rule 3
if (i != iSelf && distance < rule3Distance) {
percVel += vel1[i];
rule3Counter++;
}
}
if (rule1Counter != 0) {
perceivedCenter /= rule1Counter;
perceivedCenter = perceivedCenter - boidPos;
}
if (rule3Counter != 0) {
percVel /= rule3Counter;
}
vel2[iSelf] += perceivedCenter*rule1Scale + c * rule2Scale + percVel * rule3Scale;
}
}
__device__ void checkGridCellAndUpdateVelCoherent(int x, int y, int z, int gridResolution,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2, int iSelf) {
if (x < 0 || x > gridResolution || y < 0 || y > gridResolution || z < 0 || z > gridResolution) {
return;
}
else {
int gridIndex = gridIndex3Dto1D(x, y, z, gridResolution);
int start = gridCellStartIndices[gridIndex];
int end = gridCellEndIndices[gridIndex];
if (start == -1 || end == -1) {
return;
}
// TODO(Wenli): Repetive code, consider refactoring.
glm::vec3 boidPos = pos[iSelf];
glm::vec3 perceivedCenter = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 percVel = glm::vec3(0.0f);;
int rule1Counter = 0;
int rule3Counter = 0;
for (int i = start; i <= end; i++) {
float distance = glm::distance(pos[i], boidPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (i != iSelf && distance < rule1Distance) {
perceivedCenter += pos[i];
rule1Counter++;
}
// Rule 2
if (i != iSelf && distance < rule2Distance) {
c -= (pos[i] - boidPos);
}
// Rule 3
if (i != iSelf && distance < rule3Distance) {
percVel += vel1[i];
rule3Counter++;
}
}
if (rule1Counter != 0) {
perceivedCenter /= rule1Counter;
perceivedCenter = perceivedCenter - boidPos;
}
if (rule3Counter != 0) {
percVel /= rule3Counter;
}
vel2[iSelf] += perceivedCenter*rule1Scale + c * rule2Scale + percVel * rule3Scale;
}
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
int x = glm::floor((pos[index].x - gridMin.x)*inverseCellWidth);
int y = glm::floor((pos[index].y - gridMin.y)*inverseCellWidth);
int z = glm::floor((pos[index].z - gridMin.z)*inverseCellWidth);
gridIndices[index] = gridIndex3Dto1D(x, y, z, gridResolution);
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
// wz: start end buffers, set to -1?
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
int gridIdx = particleGridIndices[index];
int prevIdx = particleGridIndices[index - 1];
if (index > 0 && prevIdx != gridIdx) {
gridCellStartIndices[gridIdx] = index;
gridCellEndIndices[prevIdx] = index - 1;
}
if (index == 0) {
gridCellStartIndices[gridIdx] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
// get particle pos
glm::vec3 partPos = pos[index];
int x = glm::floor((partPos.x - gridMin.x)*inverseCellWidth);
int y = glm::floor((partPos.y - gridMin.y)*inverseCellWidth);
int z = glm::floor((partPos.z - gridMin.z)*inverseCellWidth);
// identify grid cell
int gridCell = gridIndex3Dto1D(x, y, z, gridResolution);
// calculate grid positions of up to eight neighbors
//glm::vec3 center = glm::vec3(x + 0.5, y + 0.5, z + 0.5);
// qX, qY, qZ are -1 or 1 depending on what quadrant of the cell the boid is in
glm::vec3 floatPos = partPos - gridMin;
glm::vec3 center = glm::vec3(x*cellWidth + cellWidth / 2, y*cellWidth + cellWidth / 2, z *cellWidth + cellWidth / 2);
glm::vec3 quadVec = floatPos - center;
int qX = getGridCellQuadrant(quadVec.x, 0);
int qY = getGridCellQuadrant(quadVec.y, 0);
int qZ = getGridCellQuadrant(quadVec.z, 0);
// Set vel2.
vel2[index] = vel1[index];
// check grid cells at x (+ qX), y (+ qY), z (+ qZ)
// add to Vel2
checkGridCellAndUpdateVel(x, y, z, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x, y, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x, y + qY, z, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x, y + qY, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x + qX, y, z, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x + qX, y, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x + qX, y + qY, z, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x + qX, y + qY, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
/*
// USED FOR TESTING CHECKING 27 GRID CELLS
for (int i = -1; i < 2; i++) {
for (int j = -1; j < 2; j++) {
for (int k = -1; k < 2; k++) {
checkGridCellAndUpdateVel(x + i, y + j, z + k, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
}
}
}
*/
if (glm::length(vel2[index]) > maxSpeed) {
vel2[index] = glm::normalize(vel2[index]) * maxSpeed;
}
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
// get particle pos
glm::vec3 partPos = pos[index];
int x = glm::floor((partPos.x - gridMin.x)*inverseCellWidth);
int y = glm::floor((partPos.y - gridMin.y)*inverseCellWidth);
int z = glm::floor((partPos.z - gridMin.z)*inverseCellWidth);
// identify grid cell
int gridCell = gridIndex3Dto1D(x, y, z, gridResolution);
// calculate grid positions of up to eight neighbors
glm::vec3 floatPos = partPos - gridMin;
glm::vec3 center = glm::vec3(x*cellWidth + cellWidth / 2, y*cellWidth + cellWidth / 2, z *cellWidth + cellWidth / 2);
glm::vec3 quadVec = floatPos - center;
// qX, qY, qZ are -1 or 1 depending on what quadrant of the cell the boid is in
int qX = getGridCellQuadrant(quadVec.x, 0);
int qY = getGridCellQuadrant(quadVec.y, 0);
int qZ = getGridCellQuadrant(quadVec.z, 0);
// Set vel2.
vel2[index] = vel1[index];
// check grid cells at x (+ qX), y (+ qY), z (+ qZ)
// add to Vel2
checkGridCellAndUpdateVelCoherent(x, y, z, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x, y, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x, y + qY, z, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x, y + qY, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x + qX, y, z, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x + qX, y, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x + qX, y + qY, z, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x + qX, y + qY, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
if (glm::length(vel2[index]) > maxSpeed) {
vel2[index] = glm::normalize(vel2[index]) * maxSpeed;
}
}
__global__ void kernShufflePositionAndVelocity(int N, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *shuffledPos, glm::vec3 *vel1, glm::vec3 *shuffledVel) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int shuffledIdx = particleArrayIndices[index];
shuffledPos[index] = pos[shuffledIdx];
shuffledVel[index] = vel1[shuffledIdx];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_vel2);
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
glm::vec3 *temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Label indices
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// Sort using thrust.
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// Set all the dev_gridCellStartIndices and EndIndices to -1
dim3 cellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <cellBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <cellBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
// Find start and end indices.
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >
(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// Velocity updates.
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
// Update position.
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// Ping pong.
glm::vec3 *temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Label indices
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// Sort using thrust.
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// Set all the dev_gridCellStartIndices and EndIndices to -1
dim3 cellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <cellBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <cellBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
// Find start and end indices.
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >
(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// Rearrange positions/velocity
kernShufflePositionAndVelocity << <fullBlocksPerGrid, blockSize >> >
(numObjects, dev_particleArrayIndices, dev_pos, dev_shuffledPos, dev_vel1, dev_shuffledVel);
// Velocity updates.
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_shuffledPos, dev_shuffledVel, dev_vel2);
// Update position with shuffledPos.
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_shuffledPos, dev_vel2);
// Ping pong.
glm::vec3 *temp1 = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp1;
//glm::vec3 *temp2 = dev_vel2;
//dev_vel2 = dev_shuffledVel;
//dev_shuffledVel = temp2;
//dev_vel2 = dev_shuffledVel;
glm::vec3 *temp = dev_pos;
dev_pos = dev_shuffledPos;
dev_shuffledPos = temp;
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_shuffledVel);
hipFree(dev_shuffledPos);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| be1f31758ac6b6eae7a1294c1a697462b49cabeb.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_shuffledPos;
glm::vec3 *dev_shuffledVel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
cudaThreadSynchronize();
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
// 2.3 Additional buffers.
cudaMalloc((void**)&dev_shuffledPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_shuffledPos failed!");
cudaMalloc((void**)&dev_shuffledVel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_shuffledVel failed!");
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
glm::vec3 boidPos = pos[iSelf];
glm::vec3 perceivedCenter = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 percVel = glm::vec3(0.0f);;
int rule1Counter = 0;
int rule3Counter = 0;
for (int i = 0; i < N; i++) {
float distance = glm::distance(pos[i], boidPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (i != iSelf && distance < rule1Distance) {
perceivedCenter += pos[i];
rule1Counter++;
}
// Rule 2
if (i != iSelf && distance < rule2Distance) {
c -= (pos[i] - boidPos);
}
// Rule 3
if (i != iSelf && distance < rule3Distance) {
percVel += vel[i];
rule3Counter++;
}
}
if (rule1Counter != 0) {
perceivedCenter /= rule1Counter;
perceivedCenter = perceivedCenter - boidPos;
}
if (rule3Counter != 0) {
percVel /= rule3Counter;
}
return perceivedCenter*rule1Scale + c * rule2Scale + percVel * rule3Scale;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
vel2[index] = vel1[index] + computeVelocityChange(N, index, pos, vel1);
if (glm::length(vel2[index]) > maxSpeed) {
vel2[index] = glm::normalize(vel2[index]) * maxSpeed;
}
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__device__ int getGridCellQuadrant(float gridCellPos, float origin) {
if (gridCellPos > origin) {
return 1;
}
else {
return -1;
}
}
// Added 9/9/17
__device__ void checkGridCellAndUpdateVel(int x, int y, int z, int gridResolution,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2, int iSelf) {
if (x < 0 || x > gridResolution || y < 0 || y > gridResolution || z < 0 || z > gridResolution) {
return;
}
else {
int gridIndex = gridIndex3Dto1D(x, y, z, gridResolution);
int start = gridCellStartIndices[gridIndex];
int end = gridCellEndIndices[gridIndex];
if (start == -1 || end == -1) {
return;
}
// TODO(Wenli): Repetive code, consider refactoring.
glm::vec3 boidPos = pos[iSelf];
glm::vec3 perceivedCenter = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 percVel = glm::vec3(0.0f);;
int rule1Counter = 0;
int rule3Counter = 0;
for (int j = start; j <= end; j++) {
// Bug was here -_-
int i = particleArrayIndices[j];
float distance = glm::distance(pos[i], boidPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (i != iSelf && distance < rule1Distance) {
perceivedCenter += pos[i];
rule1Counter++;
}
// Rule 2
if (i != iSelf && distance < rule2Distance) {
c -= (pos[i] - boidPos);
}
// Rule 3
if (i != iSelf && distance < rule3Distance) {
percVel += vel1[i];
rule3Counter++;
}
}
if (rule1Counter != 0) {
perceivedCenter /= rule1Counter;
perceivedCenter = perceivedCenter - boidPos;
}
if (rule3Counter != 0) {
percVel /= rule3Counter;
}
vel2[iSelf] += perceivedCenter*rule1Scale + c * rule2Scale + percVel * rule3Scale;
}
}
__device__ void checkGridCellAndUpdateVelCoherent(int x, int y, int z, int gridResolution,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2, int iSelf) {
if (x < 0 || x > gridResolution || y < 0 || y > gridResolution || z < 0 || z > gridResolution) {
return;
}
else {
int gridIndex = gridIndex3Dto1D(x, y, z, gridResolution);
int start = gridCellStartIndices[gridIndex];
int end = gridCellEndIndices[gridIndex];
if (start == -1 || end == -1) {
return;
}
// TODO(Wenli): Repetive code, consider refactoring.
glm::vec3 boidPos = pos[iSelf];
glm::vec3 perceivedCenter = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 percVel = glm::vec3(0.0f);;
int rule1Counter = 0;
int rule3Counter = 0;
for (int i = start; i <= end; i++) {
float distance = glm::distance(pos[i], boidPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (i != iSelf && distance < rule1Distance) {
perceivedCenter += pos[i];
rule1Counter++;
}
// Rule 2
if (i != iSelf && distance < rule2Distance) {
c -= (pos[i] - boidPos);
}
// Rule 3
if (i != iSelf && distance < rule3Distance) {
percVel += vel1[i];
rule3Counter++;
}
}
if (rule1Counter != 0) {
perceivedCenter /= rule1Counter;
perceivedCenter = perceivedCenter - boidPos;
}
if (rule3Counter != 0) {
percVel /= rule3Counter;
}
vel2[iSelf] += perceivedCenter*rule1Scale + c * rule2Scale + percVel * rule3Scale;
}
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
int x = glm::floor((pos[index].x - gridMin.x)*inverseCellWidth);
int y = glm::floor((pos[index].y - gridMin.y)*inverseCellWidth);
int z = glm::floor((pos[index].z - gridMin.z)*inverseCellWidth);
gridIndices[index] = gridIndex3Dto1D(x, y, z, gridResolution);
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
// wz: start end buffers, set to -1?
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
int gridIdx = particleGridIndices[index];
int prevIdx = particleGridIndices[index - 1];
if (index > 0 && prevIdx != gridIdx) {
gridCellStartIndices[gridIdx] = index;
gridCellEndIndices[prevIdx] = index - 1;
}
if (index == 0) {
gridCellStartIndices[gridIdx] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
// get particle pos
glm::vec3 partPos = pos[index];
int x = glm::floor((partPos.x - gridMin.x)*inverseCellWidth);
int y = glm::floor((partPos.y - gridMin.y)*inverseCellWidth);
int z = glm::floor((partPos.z - gridMin.z)*inverseCellWidth);
// identify grid cell
int gridCell = gridIndex3Dto1D(x, y, z, gridResolution);
// calculate grid positions of up to eight neighbors
//glm::vec3 center = glm::vec3(x + 0.5, y + 0.5, z + 0.5);
// qX, qY, qZ are -1 or 1 depending on what quadrant of the cell the boid is in
glm::vec3 floatPos = partPos - gridMin;
glm::vec3 center = glm::vec3(x*cellWidth + cellWidth / 2, y*cellWidth + cellWidth / 2, z *cellWidth + cellWidth / 2);
glm::vec3 quadVec = floatPos - center;
int qX = getGridCellQuadrant(quadVec.x, 0);
int qY = getGridCellQuadrant(quadVec.y, 0);
int qZ = getGridCellQuadrant(quadVec.z, 0);
// Set vel2.
vel2[index] = vel1[index];
// check grid cells at x (+ qX), y (+ qY), z (+ qZ)
// add to Vel2
checkGridCellAndUpdateVel(x, y, z, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x, y, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x, y + qY, z, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x, y + qY, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x + qX, y, z, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x + qX, y, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x + qX, y + qY, z, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVel(x + qX, y + qY, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
/*
// USED FOR TESTING CHECKING 27 GRID CELLS
for (int i = -1; i < 2; i++) {
for (int j = -1; j < 2; j++) {
for (int k = -1; k < 2; k++) {
checkGridCellAndUpdateVel(x + i, y + j, z + k, gridResolution, gridCellStartIndices, gridCellEndIndices, particleArrayIndices, pos, vel1, vel2, index);
}
}
}
*/
if (glm::length(vel2[index]) > maxSpeed) {
vel2[index] = glm::normalize(vel2[index]) * maxSpeed;
}
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N) {
return;
}
// get particle pos
glm::vec3 partPos = pos[index];
int x = glm::floor((partPos.x - gridMin.x)*inverseCellWidth);
int y = glm::floor((partPos.y - gridMin.y)*inverseCellWidth);
int z = glm::floor((partPos.z - gridMin.z)*inverseCellWidth);
// identify grid cell
int gridCell = gridIndex3Dto1D(x, y, z, gridResolution);
// calculate grid positions of up to eight neighbors
glm::vec3 floatPos = partPos - gridMin;
glm::vec3 center = glm::vec3(x*cellWidth + cellWidth / 2, y*cellWidth + cellWidth / 2, z *cellWidth + cellWidth / 2);
glm::vec3 quadVec = floatPos - center;
// qX, qY, qZ are -1 or 1 depending on what quadrant of the cell the boid is in
int qX = getGridCellQuadrant(quadVec.x, 0);
int qY = getGridCellQuadrant(quadVec.y, 0);
int qZ = getGridCellQuadrant(quadVec.z, 0);
// Set vel2.
vel2[index] = vel1[index];
// check grid cells at x (+ qX), y (+ qY), z (+ qZ)
// add to Vel2
checkGridCellAndUpdateVelCoherent(x, y, z, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x, y, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x, y + qY, z, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x, y + qY, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x + qX, y, z, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x + qX, y, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x + qX, y + qY, z, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
checkGridCellAndUpdateVelCoherent(x + qX, y + qY, z + qZ, gridResolution, gridCellStartIndices, gridCellEndIndices, pos, vel1, vel2, index);
if (glm::length(vel2[index]) > maxSpeed) {
vel2[index] = glm::normalize(vel2[index]) * maxSpeed;
}
}
__global__ void kernShufflePositionAndVelocity(int N, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *shuffledPos, glm::vec3 *vel1, glm::vec3 *shuffledVel) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int shuffledIdx = particleArrayIndices[index];
shuffledPos[index] = pos[shuffledIdx];
shuffledVel[index] = vel1[shuffledIdx];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_vel2);
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
glm::vec3 *temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Label indices
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// Sort using thrust.
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// Set all the dev_gridCellStartIndices and EndIndices to -1
dim3 cellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <cellBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <cellBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
// Find start and end indices.
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >
(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// Velocity updates.
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
// Update position.
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// Ping pong.
glm::vec3 *temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Label indices
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// Sort using thrust.
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// Set all the dev_gridCellStartIndices and EndIndices to -1
dim3 cellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <cellBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <cellBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
// Find start and end indices.
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >
(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// Rearrange positions/velocity
kernShufflePositionAndVelocity << <fullBlocksPerGrid, blockSize >> >
(numObjects, dev_particleArrayIndices, dev_pos, dev_shuffledPos, dev_vel1, dev_shuffledVel);
// Velocity updates.
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> >
(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_shuffledPos, dev_shuffledVel, dev_vel2);
// Update position with shuffledPos.
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_shuffledPos, dev_vel2);
// Ping pong.
glm::vec3 *temp1 = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp1;
//glm::vec3 *temp2 = dev_vel2;
//dev_vel2 = dev_shuffledVel;
//dev_shuffledVel = temp2;
//dev_vel2 = dev_shuffledVel;
glm::vec3 *temp = dev_pos;
dev_pos = dev_shuffledPos;
dev_shuffledPos = temp;
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_shuffledVel);
cudaFree(dev_shuffledPos);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
a02d7ef49d7af648867e9757c1997e3372eddec7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sys/time.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "helper.cu.h"
#include "kernels-optim.cu.h"
#include "sequential.cu.h"
#define BLOCK_SIZE 1024//1024 //1024//2048
#define WIDTH_A 1024//1024 //1024//2048
#define HEIGHT_A 1//2048//2048//2048
#define WIDTH_B 1024//4096//2048
#define TILE_HEIGHT 1
#define TILE_WIDTH 1024
#define F32_MIN -FLT_MAX
#define I32_MIN -2147483648
typedef unsigned int uint;
/////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////
//// Helpers
/////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////
int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) {
unsigned int resolution=1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) - (t1->tv_usec + resolution * t1->tv_sec);
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
void randomInit(float* data, int size) {
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
template<class T>
void matMult(T* A, T* B, T* C, int colsA, int rowsA, int colsB) {
for(int i = 0; i < rowsA; i++) {
for(int j = 0; j < colsB; j++) {
float sum = 0.0;
for(int k = 0; k < colsA; k++) {
sum += A[i*colsA + k] * B[k * colsB + j];
}
C[i * colsB + j] = sum;
}
}
}
template<class T>
bool validate(float* A,float* B, unsigned int sizeAB){
for(int i = 0; i < sizeAB; i++)
if (fabs(A[i] - B[i]) > 0.0005){
printf("INVALID RESULT %d %f %f\n", i, A[i], B[i]);
return false;
}
printf("VALID RESULT!\n");
return true;
}
int gpuAssert(hipError_t code) {
if(code != hipSuccess) {
printf("GPU Error: %s\n", hipGetErrorString(code));
return -1;
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//// PROGRAM MAIN
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char const *argv[]) {
if (argc != 2) {
printf("Please include the name of the dataset.\n");
return -1;
}
///////////////////////////////////////////////////////////////////////////////
//// PARSING
///////////////////////////////////////////////////////////////////////////////
FILE *fp, *fpim;
if (argv[1][0] == 's') {
fp = fopen("../data/saharaC.in", "r");
fpim = fopen("../data/saharaCimages.in", "r");
} else {
fp = fopen("../data/peruC.in", "r");
fpim = fopen("../data/peruCimages.in", "r");
}
if (fp == NULL || fpim == NULL) {
printf("Files not read.\n");
return -1;
}
char input1[10], input2[10], input3[30], input4[30];
char input5[30], input6[30], input7[50], input8[30];
fscanf(fp, " %[^\n] %[^\n] %[^\n] %[^\n] ", input1,input2,input3,input4);
fscanf(fp, " %[^\n] %[^\n] %[^\n] %[^\n] ", input5,input6,input7,input8);
int k = atoi(input2);
uint n = (uint)atoi(input3);
uint N = (uint)atoi(input8);
uint mIRL = (uint)atoi(input7);
int trend = atoi(input1);
float freq = atof(input4);
float hfrac = atof(input5);
float lam = atof(input6);
uint m = 2;
int K = 2*k + 2;
int mappingLen, imageLen, i = 0;
// getting the lengths of mappingindices and images
while (getc(fp) != EOF) { mappingLen++; }
while (getc(fpim) != EOF) { imageLen++; }
// rewinding the pointer to extract the data
rewind(fpim);
// extracting each array
char mappings[mappingLen], pixels[(imageLen-mappingLen)];
fscanf(fpim, " %[^\n] %[^\n] ", mappings, pixels);
// converting mappingindices from char* to int*
char delim[] = ",";
char *mapPtr = strtok(mappings, delim);
// allocating host memory for mappingindices and pixels
int* h_mappingindices = (int*) calloc(N,sizeof(int));
float* h_sample = (float*) calloc(N*m,sizeof(float));
// inserting data to mappingindices
while(mapPtr != NULL) {
h_mappingindices[i] = atoi(mapPtr);
i++;
mapPtr = strtok(NULL, delim);
}
// converting samples from char* to float*
char *pixelsPtr = strtok(pixels, delim);
i = 0;
// inserting data to sample
while(pixelsPtr != NULL) {
h_sample[i] = atof(pixelsPtr);
i++;
pixelsPtr = strtok(NULL, delim);
}
// closing file with data
fclose(fp);
// opening file for validation of results
FILE* fpV = fopen("../data/val.data","a+");
// allocate device memory
uint map_size = N*sizeof(int);
uint sam_size = N*m*sizeof(float);
int* d_mappingindices;
float* d_sample;
hipMalloc((void**) &d_mappingindices, map_size);
hipMalloc((void**) &d_sample, sam_size);
// copy host memory to device
hipMemcpy(d_mappingindices, h_mappingindices, map_size, hipMemcpyHostToDevice);
hipMemcpy(d_sample, h_sample, sam_size, hipMemcpyHostToDevice);
uint X_size = K*N*sizeof(float);
uint Xsqr_size = K*K*m*sizeof(float);
uint Xinv_size = K*K*m*sizeof(float);
uint B0_size = K*m*sizeof(float);
// allocate host memory for X
float* h_X = (float*) calloc(N*K,sizeof(float));
float* h_XT = (float*) calloc(K*N,sizeof(float));
float* h_Xsqr = (float*) calloc(K*K*m,sizeof(float));
float* h_Xinv = (float*) calloc(K*K*m,sizeof(float));
float* h_B0 = (float*) calloc(K*m,sizeof(float));
// allocate device memory for X, XT and Xsqr
float *d_X, *d_XT, *d_Xsqr, *d_Xinv, *d_B0;
hipMalloc((void**) &d_X, X_size);
hipMalloc((void**) &d_XT, X_size);
hipMalloc((void**) &d_Xsqr, Xsqr_size);
hipMalloc((void**) &d_Xinv, Xinv_size);
hipMalloc((void**) &d_B0, B0_size);
/////////////////////////////////////////////////////////////////////////
//// KERNEL 1
/////////////////////////////////////////////////////////////////////////
{
dim3 block(1024, 1, 1);
dim3 grid (1024, 1, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 1
hipLaunchKernelGGL(( ker1) , dim3(grid), dim3(block) , 0, 0, N, K, freq, d_mappingindices, d_X, d_XT);
hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( hipPeekAtLastError() );
// copy result from device to host
hipMemcpy(h_X, d_X, X_size, hipMemcpyDeviceToHost);
hipMemcpy(h_XT, d_XT, X_size, hipMemcpyDeviceToHost);
// add to validation
printX(fpV, h_X, K, N);
printf("GPU Optimized Kernel 1 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
// double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
// printf( "GPU Optimized Kernel 1 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 2
/////////////////////////////////////////////////////////////////////////
{
dim3 block(K, K, 1);
dim3 grid (m, 1, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 2
hipLaunchKernelGGL(( ker2) , dim3(grid), dim3(block) , 0, 0, n, N, m, d_X, d_XT, d_sample, d_Xsqr, K);
hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( hipPeekAtLastError() );
// copy result from device to host
hipMemcpy(h_Xsqr, d_Xsqr, Xsqr_size, hipMemcpyDeviceToHost);
// validation
printM(fpV, h_Xsqr, m, K);
printf("GPU Optimized Kernel 2 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
// double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
// printf( "GPU Optimized Kernel 2 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 3
/////////////////////////////////////////////////////////////////////////
{
dim3 block(2*K, K, 1);
dim3 grid (n, 1, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 3
hipLaunchKernelGGL(( ker3), dim3(grid), dim3(block), 4*K*K*sizeof(float) , 0, m, K, d_Xsqr, d_Xinv);
// hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( hipPeekAtLastError() );
// copy result from device to host
hipMemcpy(h_Xinv, d_Xinv, Xinv_size, hipMemcpyDeviceToHost);
printM(fpV, h_Xinv, m, K);
// printM(fpV, h_Xsqr, m, K);
printf("GPU Optimized Kernel 3 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
// printf( "GPU Optimized Kernel 3 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 4
/////////////////////////////////////////////////////////////////////////
{
dim3 block(K, K, 1);
dim3 grid (m, 1, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 4
hipLaunchKernelGGL(( ker4) , dim3(grid), dim3(block) , 0, 0, m, n, N, d_X, K, d_sample, d_B0);
hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( hipPeekAtLastError() );
// copy result from device to host
hipMemcpy(h_B0, d_B0, B0_size, hipMemcpyDeviceToHost);
// add to validation
// printVf(fpV, h_B0, m, K);
printf("GPU Optimized Kernel 4 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
// printf( "GPU Optimized Kernel 4 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 5
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 5
// ker5 <<< grid, block >>> ();
// hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( hipPeekAtLastError() );
// copy result from device to host
// hipMemcpy(h_X, d_X, X_size, hipMemcpyDeviceToHost);
printf("GPU Optimized Kernel 5 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 5 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 6
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 6
// ker6 <<< grid, block >>> ();
// hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( hipPeekAtLastError() );
// copy result from device to host
// hipMemcpy(h_X, d_X, X_size, hipMemcpyDeviceToHost);
printf("GPU Optimized Kernel 6 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 6 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 7
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 7
// ker7 <<< grid, block >>> ();
// hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( hipPeekAtLastError() );
// copy result from device to host
// hipMemcpy(h_X, d_X, X_size, hipMemcpyDeviceToHost);
printf("GPU Optimized Kernel 7 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 7 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 8
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 8
// ker8 <<< grid, block >>> ();
// hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( hipPeekAtLastError() );
// copy result from device to host
// hipMemcpy(h_X, d_X, X_size, hipMemcpyDeviceToHost);
printf("GPU Optimized Kernel 8 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 8 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 9
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 9
// ker9 <<< grid, block >>> ();
// hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( hipPeekAtLastError() );
// copy result from device to host
// hipMemcpy(h_X, d_X, X_size, hipMemcpyDeviceToHost);
printf("GPU Optimized Kernel 9 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 9 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 10
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 10
// ker10 <<< grid, block >>> ();
// hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( hipPeekAtLastError() );
// copy result from device to host
// hipMemcpy(h_X, d_X, X_size, hipMemcpyDeviceToHost);
printf("GPU Optimized Kernel 10 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 10 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////
//// VALIDATION
/////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////
fclose(fpV);
// 7. clean up memory
free(h_mappingindices);
free(h_sample);
free(h_X);
free(h_XT);
free(h_Xsqr);
hipFree(d_X);
hipFree(d_XT);
hipFree(d_Xsqr);
hipFree(d_mappingindices);
hipFree(d_sample);
}
| a02d7ef49d7af648867e9757c1997e3372eddec7.cu | #include <sys/time.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "helper.cu.h"
#include "kernels-optim.cu.h"
#include "sequential.cu.h"
#define BLOCK_SIZE 1024//1024 //1024//2048
#define WIDTH_A 1024//1024 //1024//2048
#define HEIGHT_A 1//2048//2048//2048
#define WIDTH_B 1024//4096//2048
#define TILE_HEIGHT 1
#define TILE_WIDTH 1024
#define F32_MIN -FLT_MAX
#define I32_MIN -2147483648
typedef unsigned int uint;
/////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////
//// Helpers
/////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////
int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) {
unsigned int resolution=1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) - (t1->tv_usec + resolution * t1->tv_sec);
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
void randomInit(float* data, int size) {
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
template<class T>
void matMult(T* A, T* B, T* C, int colsA, int rowsA, int colsB) {
for(int i = 0; i < rowsA; i++) {
for(int j = 0; j < colsB; j++) {
float sum = 0.0;
for(int k = 0; k < colsA; k++) {
sum += A[i*colsA + k] * B[k * colsB + j];
}
C[i * colsB + j] = sum;
}
}
}
template<class T>
bool validate(float* A,float* B, unsigned int sizeAB){
for(int i = 0; i < sizeAB; i++)
if (fabs(A[i] - B[i]) > 0.0005){
printf("INVALID RESULT %d %f %f\n", i, A[i], B[i]);
return false;
}
printf("VALID RESULT!\n");
return true;
}
int gpuAssert(cudaError_t code) {
if(code != cudaSuccess) {
printf("GPU Error: %s\n", cudaGetErrorString(code));
return -1;
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//// PROGRAM MAIN
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char const *argv[]) {
if (argc != 2) {
printf("Please include the name of the dataset.\n");
return -1;
}
///////////////////////////////////////////////////////////////////////////////
//// PARSING
///////////////////////////////////////////////////////////////////////////////
FILE *fp, *fpim;
if (argv[1][0] == 's') {
fp = fopen("../data/saharaC.in", "r");
fpim = fopen("../data/saharaCimages.in", "r");
} else {
fp = fopen("../data/peruC.in", "r");
fpim = fopen("../data/peruCimages.in", "r");
}
if (fp == NULL || fpim == NULL) {
printf("Files not read.\n");
return -1;
}
char input1[10], input2[10], input3[30], input4[30];
char input5[30], input6[30], input7[50], input8[30];
fscanf(fp, " %[^\n] %[^\n] %[^\n] %[^\n] ", input1,input2,input3,input4);
fscanf(fp, " %[^\n] %[^\n] %[^\n] %[^\n] ", input5,input6,input7,input8);
int k = atoi(input2);
uint n = (uint)atoi(input3);
uint N = (uint)atoi(input8);
uint mIRL = (uint)atoi(input7);
int trend = atoi(input1);
float freq = atof(input4);
float hfrac = atof(input5);
float lam = atof(input6);
uint m = 2;
int K = 2*k + 2;
int mappingLen, imageLen, i = 0;
// getting the lengths of mappingindices and images
while (getc(fp) != EOF) { mappingLen++; }
while (getc(fpim) != EOF) { imageLen++; }
// rewinding the pointer to extract the data
rewind(fpim);
// extracting each array
char mappings[mappingLen], pixels[(imageLen-mappingLen)];
fscanf(fpim, " %[^\n] %[^\n] ", mappings, pixels);
// converting mappingindices from char* to int*
char delim[] = ",";
char *mapPtr = strtok(mappings, delim);
// allocating host memory for mappingindices and pixels
int* h_mappingindices = (int*) calloc(N,sizeof(int));
float* h_sample = (float*) calloc(N*m,sizeof(float));
// inserting data to mappingindices
while(mapPtr != NULL) {
h_mappingindices[i] = atoi(mapPtr);
i++;
mapPtr = strtok(NULL, delim);
}
// converting samples from char* to float*
char *pixelsPtr = strtok(pixels, delim);
i = 0;
// inserting data to sample
while(pixelsPtr != NULL) {
h_sample[i] = atof(pixelsPtr);
i++;
pixelsPtr = strtok(NULL, delim);
}
// closing file with data
fclose(fp);
// opening file for validation of results
FILE* fpV = fopen("../data/val.data","a+");
// allocate device memory
uint map_size = N*sizeof(int);
uint sam_size = N*m*sizeof(float);
int* d_mappingindices;
float* d_sample;
cudaMalloc((void**) &d_mappingindices, map_size);
cudaMalloc((void**) &d_sample, sam_size);
// copy host memory to device
cudaMemcpy(d_mappingindices, h_mappingindices, map_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_sample, h_sample, sam_size, cudaMemcpyHostToDevice);
uint X_size = K*N*sizeof(float);
uint Xsqr_size = K*K*m*sizeof(float);
uint Xinv_size = K*K*m*sizeof(float);
uint B0_size = K*m*sizeof(float);
// allocate host memory for X
float* h_X = (float*) calloc(N*K,sizeof(float));
float* h_XT = (float*) calloc(K*N,sizeof(float));
float* h_Xsqr = (float*) calloc(K*K*m,sizeof(float));
float* h_Xinv = (float*) calloc(K*K*m,sizeof(float));
float* h_B0 = (float*) calloc(K*m,sizeof(float));
// allocate device memory for X, XT and Xsqr
float *d_X, *d_XT, *d_Xsqr, *d_Xinv, *d_B0;
cudaMalloc((void**) &d_X, X_size);
cudaMalloc((void**) &d_XT, X_size);
cudaMalloc((void**) &d_Xsqr, Xsqr_size);
cudaMalloc((void**) &d_Xinv, Xinv_size);
cudaMalloc((void**) &d_B0, B0_size);
/////////////////////////////////////////////////////////////////////////
//// KERNEL 1
/////////////////////////////////////////////////////////////////////////
{
dim3 block(1024, 1, 1);
dim3 grid (1024, 1, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 1
ker1 <<< grid, block >>>(N, K, freq, d_mappingindices, d_X, d_XT);
cudaDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( cudaPeekAtLastError() );
// copy result from device to host
cudaMemcpy(h_X, d_X, X_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_XT, d_XT, X_size, cudaMemcpyDeviceToHost);
// add to validation
printX(fpV, h_X, K, N);
printf("GPU Optimized Kernel 1 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
// double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
// printf( "GPU Optimized Kernel 1 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 2
/////////////////////////////////////////////////////////////////////////
{
dim3 block(K, K, 1);
dim3 grid (m, 1, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 2
ker2 <<< grid, block >>> (n, N, m, d_X, d_XT, d_sample, d_Xsqr, K);
cudaDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( cudaPeekAtLastError() );
// copy result from device to host
cudaMemcpy(h_Xsqr, d_Xsqr, Xsqr_size, cudaMemcpyDeviceToHost);
// validation
printM(fpV, h_Xsqr, m, K);
printf("GPU Optimized Kernel 2 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
// double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
// printf( "GPU Optimized Kernel 2 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 3
/////////////////////////////////////////////////////////////////////////
{
dim3 block(2*K, K, 1);
dim3 grid (n, 1, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 3
ker3<<< grid, block, 4*K*K*sizeof(float) >>>(m, K, d_Xsqr, d_Xinv);
// cudaDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( cudaPeekAtLastError() );
// copy result from device to host
cudaMemcpy(h_Xinv, d_Xinv, Xinv_size, cudaMemcpyDeviceToHost);
printM(fpV, h_Xinv, m, K);
// printM(fpV, h_Xsqr, m, K);
printf("GPU Optimized Kernel 3 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
// printf( "GPU Optimized Kernel 3 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 4
/////////////////////////////////////////////////////////////////////////
{
dim3 block(K, K, 1);
dim3 grid (m, 1, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 4
ker4 <<< grid, block >>> (m, n, N, d_X, K, d_sample, d_B0);
cudaDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( cudaPeekAtLastError() );
// copy result from device to host
cudaMemcpy(h_B0, d_B0, B0_size, cudaMemcpyDeviceToHost);
// add to validation
// printVf(fpV, h_B0, m, K);
printf("GPU Optimized Kernel 4 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
// printf( "GPU Optimized Kernel 4 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 5
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 5
// ker5 <<< grid, block >>> ();
// cudaDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( cudaPeekAtLastError() );
// copy result from device to host
// cudaMemcpy(h_X, d_X, X_size, cudaMemcpyDeviceToHost);
printf("GPU Optimized Kernel 5 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 5 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 6
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 6
// ker6 <<< grid, block >>> ();
// cudaDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( cudaPeekAtLastError() );
// copy result from device to host
// cudaMemcpy(h_X, d_X, X_size, cudaMemcpyDeviceToHost);
printf("GPU Optimized Kernel 6 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 6 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 7
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 7
// ker7 <<< grid, block >>> ();
// cudaDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( cudaPeekAtLastError() );
// copy result from device to host
// cudaMemcpy(h_X, d_X, X_size, cudaMemcpyDeviceToHost);
printf("GPU Optimized Kernel 7 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 7 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 8
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 8
// ker8 <<< grid, block >>> ();
// cudaDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( cudaPeekAtLastError() );
// copy result from device to host
// cudaMemcpy(h_X, d_X, X_size, cudaMemcpyDeviceToHost);
printf("GPU Optimized Kernel 8 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 8 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 9
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 9
// ker9 <<< grid, block >>> ();
// cudaDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( cudaPeekAtLastError() );
// copy result from device to host
// cudaMemcpy(h_X, d_X, X_size, cudaMemcpyDeviceToHost);
printf("GPU Optimized Kernel 9 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 9 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
//// KERNEL 10
/////////////////////////////////////////////////////////////////////////
{
int dimx = ceil( ((float) WIDTH_B)/TILE_HEIGHT );
int dimy = ceil( ((float)HEIGHT_A)/TILE_WIDTH );
dim3 block(TILE_WIDTH, TILE_HEIGHT, 1);
dim3 grid (dimx, dimy, 1);
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// GPU call to kernel 10
// ker10 <<< grid, block >>> ();
// cudaDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
// check for cuda errors
gpuAssert( cudaPeekAtLastError() );
// copy result from device to host
// cudaMemcpy(h_X, d_X, X_size, cudaMemcpyDeviceToHost);
printf("GPU Optimized Kernel 10 runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "GPU Optimized Kernel 10 Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
/////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////
//// VALIDATION
/////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////
fclose(fpV);
// 7. clean up memory
free(h_mappingindices);
free(h_sample);
free(h_X);
free(h_XT);
free(h_Xsqr);
cudaFree(d_X);
cudaFree(d_XT);
cudaFree(d_Xsqr);
cudaFree(d_mappingindices);
cudaFree(d_sample);
}
|
0e2e50b1eabb78e717e44d2a684468432d38e628.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
// this is getting confusing
// this is the kernel that we're going to launch.
#include <photon_propagator/cuda/cascades_to_photons.cuh>
#include <photon_propagator/cpp/device.hpp>
#include <photon_propagator/cpp/random.hpp>
#include <photon_propagator/ice_model.hpp>
#include <photon_propagator/cascades.hpp>
#include <photon_propagator/photons.hpp>
#include <photon_propagator/cascades_to_photons.hpp>
void cascades::GeneratePhotons(const Random& rng,
const IceModel& ice_model,
const Cascades& cascades,
const std::shared_ptr<Device>& device,
const unsigned number_of_blocks,
const unsigned threads_per_block,
Photons& output){
// setup
// we can only launch in blocks of n_concurrent_threads.
// we have lots of photons.
// this is why it makes sense to generate, propagate, and pull hits.
const unsigned n_concurrent_threads{number_of_blocks*threads_per_block};
const unsigned n_photons{n_concurrent_threads};
Photons photons(n_concurrent_threads, device);
photons.to_device(); // i don't need to add photons. the kernel does that.
// in fact this is the whole point of this kernel
// to set the initial (pre-propagation) state of the
// photons.
const cascade& cscd = cascades.at(0);
hipLaunchKernelGGL(( cascades_to_photons), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, rng.__device_ptr,
ice_model.__device_ptr,
photons.__device_ptr,
n_photons,
cscd);
// Now pull the photons off the device
// and fill the 'output' vector.
// it might actually make more sense at some point to do as clsim does
// and have each kernel propagate N photons.
}
| 0e2e50b1eabb78e717e44d2a684468432d38e628.cu | #include <curand_kernel.h>
// this is getting confusing
// this is the kernel that we're going to launch.
#include <photon_propagator/cuda/cascades_to_photons.cuh>
#include <photon_propagator/cpp/device.hpp>
#include <photon_propagator/cpp/random.hpp>
#include <photon_propagator/ice_model.hpp>
#include <photon_propagator/cascades.hpp>
#include <photon_propagator/photons.hpp>
#include <photon_propagator/cascades_to_photons.hpp>
void cascades::GeneratePhotons(const Random& rng,
const IceModel& ice_model,
const Cascades& cascades,
const std::shared_ptr<Device>& device,
const unsigned number_of_blocks,
const unsigned threads_per_block,
Photons& output){
// setup
// we can only launch in blocks of n_concurrent_threads.
// we have lots of photons.
// this is why it makes sense to generate, propagate, and pull hits.
const unsigned n_concurrent_threads{number_of_blocks*threads_per_block};
const unsigned n_photons{n_concurrent_threads};
Photons photons(n_concurrent_threads, device);
photons.to_device(); // i don't need to add photons. the kernel does that.
// in fact this is the whole point of this kernel
// to set the initial (pre-propagation) state of the
// photons.
const cascade& cscd = cascades.at(0);
cascades_to_photons<<<number_of_blocks, threads_per_block>>>(rng.__device_ptr,
ice_model.__device_ptr,
photons.__device_ptr,
n_photons,
cscd);
// Now pull the photons off the device
// and fill the 'output' vector.
// it might actually make more sense at some point to do as clsim does
// and have each kernel propagate N photons.
}
|
9dd84a2798bdff5741993b0818b2e13f6d6e16d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <c10/util/Logging.h>
#include <c10/util/bit_cast.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPGraphsUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <ATen/native/hip/PersistentSoftmax.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/scalar_tensor.h>
#endif
#include <c10/hip/HIPMathCompat.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/hip/sdp_utils.h>
#include <ATen/native/transformers/sdp_utils_cpp.h>
#ifdef USE_FLASH_ATTENTION
// FlashAttention Specific Imports
#include <ATen/native/transformers/hip/flash_attn/fmha_api.h>
// MemoryEfficient Attention Specific Imports
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h>
#include <ATen/native/transformers/hip/mem_eff_attention/kernels/cutlassF.h>
#include <ATen/native/transformers/hip/mem_eff_attention/pytorch_utils.h>
#endif
namespace at {
namespace native {
namespace {
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_sizes().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = ::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_sizes());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_HIP_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> native_multi_head_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const int64_t num_head,
const Tensor& qkv_weight,
const Tensor& qkv_bias,
const Tensor& proj_weight,
const Tensor& proj_bias,
const c10::optional<Tensor>& mask,
bool need_weights,
bool average_attn_weights,
const c10::optional<int64_t> mask_type) {
// query shape: [B, T, D]
// qkv_weight shape: [3 * D, D]
TORCH_CHECK(
!mask || !query.is_nested(),
"NestedTensor with mask is not supported yet");
const auto D = embed_dim;
TORCH_CHECK(
query.dim() == 3,
"expected 3-D `query`, got ",
query.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || query.sizes()[2] == embed_dim,
"passed-in embed_dim ",
embed_dim,
" didn't match last dim of query ",
query.sizes()[2]);
TORCH_CHECK(
key.dim() == 3,
"expected 3-D `key`, got ",
key.dim(),
"-D tensor");
TORCH_CHECK(
value.dim() == 3,
"expected 3-D `value`, got ",
value.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || key.is_nested() || value.is_nested() ||
(query.sizes() == key.sizes() && key.sizes() == value.sizes()),
"expected `query`/`key`/`value` shapes to match");
TORCH_CHECK(
qkv_weight.dim() == 2,
"expected 2-D `qkv_weight`, got ",
qkv_weight.dim(),
"-D tensor");
TORCH_CHECK(
D * 3 == qkv_weight.sizes()[0],
"expected `qkv_weight` first dim to be 3x embed_dim");
TORCH_CHECK(
D == qkv_weight.sizes()[1],
"expected `qkv_weight` second dim to be embed_Dim");
TORCH_CHECK(
qkv_bias.dim() == 1,
"expected 1-D `qkv_bias`, got ",
qkv_bias.dim(),
"-D tensor");
TORCH_CHECK(
qkv_bias.sizes()[0] == 3 * D,
"expected `qkv_bias` first dim and first dim of query to be equal");
TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`");
#ifndef NDEBUG
const auto B = query.is_nested()
? get_nested_tensor_impl(query)->get_nested_sizes().size(0)
: query.sizes()[0];
auto T = query.is_nested() ? 0 : query.sizes()[1];
#endif
const auto dim_per_head = D / num_head;
if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) {
// We have not done linear projection yet but the input for SDP
// Is expected to be 4 dimensional. We "cheaply" create view tensors
// That will then be used for checking hot path conditions with select_sd_backend
auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, false};
auto backend = select_sdp_backend(kernel_params);
// strides from packed projection for nested tensors when seq_len is 1 will be
// and will trigger a contiguous call in the kernel, so we prevent this
bool no_seq_len_1_nested = query.is_nested() ? check_for_seq_len_1_nested_tensor(kernel_params, false) : true;
// The API for transfomer_encoder is a mask of shape (Batch_Size, Seq_len_q)
// For mem-eff attention this will cause the expand call to error
// For now I am going to turn of that path not have to deal with all the annoying
// Mask type shape grossness
if (!mask.has_value() && no_seq_len_1_nested &&
(backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention)) {
auto x = at::linear(query, qkv_weight, qkv_bias);
auto chunks = x.chunk(3, -1);
auto x_size_0 = x.size(0);
chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
auto y = at::scaled_dot_product_attention(
chunks[0], chunks[1], chunks[2], mask, 0.0, false, c10::nullopt);
auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim});
return std::make_tuple(
at::linear(past_sdp, proj_weight, proj_bias), Tensor());
}
// Returned math or error lets not use it
}
// shape: [B, T, 3 x D]
auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight);
if (!qkv.is_nested() && qkv.numel() == 0) {
if (query.is_nested()) {
return std::make_tuple(Tensor(), Tensor());
}
return std::make_tuple(at::empty_like(query), Tensor());
}
#ifndef NDEBUG
if (!query.is_nested() || !qkv.is_nested()) {
if (query.is_nested()) {
T = qkv.size(1);
}
debug_assert_shape(__LINE__, qkv, {B, T, 3 * D});
}
#endif
#ifdef DEBUG_PRINT_EACH_STEP
if (!qkv.is_nested()) {
std::cerr << "qkv: " << qkv << std::endl;
}
#endif
// shape: 3 x [B, num_head, T, dim_per_head]
auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head);
qkv = Tensor(); // Not used any more, allow free
auto& q = std::get<0>(q_k_v);
const auto& k = std::get<1>(q_k_v);
const auto& v = std::get<2>(q_k_v);
#ifndef NDEBUG
debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "q: " << q << std::endl;
std::cerr << "k: " << k << std::endl;
std::cerr << "v: " << v << std::endl;
#endif
// shape: [B, num_head, T, T]
auto qkt = bmm_nt(q, k);
// q & k are dead but cannot be freed because they were packed with v
#ifndef NDEBUG
debug_assert_shape(__LINE__, qkt, {B, num_head, T, T});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, T]
// TODO: long-term, have a kernel that works with
// NestedTensor directly if there is no mask passed
qkt = masked_softmax(qkt, mask, query, mask_type);
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt after softmax: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, dim_per_head]
// reuse storage for q; we're done with it
auto attn_ctx = bmm_nn(q, qkt, v);
// qkv is not dead; we just reused storage for q!
if (!need_weights) {
qkt = Tensor();
}
#ifndef NDEBUG
debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "attn_ctx: " << attn_ctx << std::endl;
#endif
// shape: [B, T, D]
// Fuse transform_0213 inside
auto proj = transform0213_gemm_nt_bias(
attn_ctx, proj_weight, proj_bias, query);
#ifndef NDEBUG
debug_assert_shape(__LINE__, proj, {B, T, D});
#endif
if (need_weights && average_attn_weights) {
// weights are not needed for full transformer, so don't worry too
// much about performance -- we implement this just to make use
// cases that don't disable need_weights still get some speedup.
qkt = qkt.sum(1);
qkt /= num_head;
}
return std::make_tuple(std::move(proj), std::move(qkt));
}
std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor, Tensor, Tensor> _scaled_dot_product_flash_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention");
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t head_dim = query.size(3);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor cumulative_sequence_length_q = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_q,
max_seqlen_batch_q,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
Tensor cumulative_sequence_length_k = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_k,
max_seqlen_batch_k,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor attention, log_sumexp, debug_attn_mask, philox_seed, philox_offset;
std::tie(attention, log_sumexp, philox_seed, philox_offset, debug_attn_mask) =
at::_flash_attention_forward(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal,
return_debug_mask,
scale);
// Reshape output to convert nnz to batch_size and seq_len
attention =
attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
return std::make_tuple(attention, log_sumexp, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, philox_seed, philox_offset, debug_attn_mask);
}
std::tuple<Tensor, Tensor, Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const c10::optional<at::Tensor>& attn_bias,
bool compute_log_sumexp,
double dropout_p,
bool is_causal,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
sdp::CustomMaskType custom_mask_type = is_causal
? sdp::CustomMaskType::CausalFromTopLeft
: sdp::CustomMaskType::NoCustomMask;
auto [attention, log_sumexp, seed, offset] = at::_efficient_attention_forward(
q_t,
k_t,
v_t,
attn_bias,
c10::nullopt,
c10::nullopt,
c10::nullopt,
dropout_p /*dropout_p*/,
static_cast<int64_t>(custom_mask_type),
compute_log_sumexp,
scale);
attention = attention.transpose(1, 2);
return std::make_tuple(std::move(attention), std::move(log_sumexp), std::move(seed), std::move(offset));
}
int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, is_causal};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::error) {
TORCH_CHECK(
false,
"No viable backend for scaled_dot_product_attention was found. ",
"This is likely due to turning off both the math kernel and the fused kernels.");
}
return static_cast<int64_t>(backend);
}
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _flash_attention_forward(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
#if defined(USE_FLASH_ATTENTION)
/*
num_splits determines how much to parallelize over the seqlen_q dimension
num_splits=0 means
it will be set by an internal heuristic. We're exposing num_splits mostly for
benchmarking. We will hard code it to 0 for now
*/
constexpr int num_splits{0};
const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked();
at::Tensor output = at::empty_like(query);
auto [logsumexp, philox_seed, philox_offset, debug_attn_mask] = pytorch_fmha::mha_fwd(
query,
key,
value,
output,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false, /*zero_tensors = false for all calls here*/
is_causal,
return_debug_mask, /*return_softmax (this is used for testing)*/
num_splits);
debug_attn_mask =
return_debug_mask ? debug_attn_mask : at::empty({0}, query.options());
return std::make_tuple(output, logsumexp, philox_seed, philox_offset, debug_attn_mask);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor(), Tensor(), Tensor(), Tensor(), Tensor());
}
std::tuple<at::Tensor, at::Tensor, Tensor, Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
const c10::optional<at::Tensor>& bias, // [b, num_heads, seqlen, seqlen]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& seqstart_q,
// (Mode 1MHK only) [b+1]: cu_seqlen_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& seqstart_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
double dropout_p, // attention matrix dropout probability
int64_t custom_mask_type,
bool compute_logsumexp,
c10::optional<double> scale,
const c10::optional<at::Tensor>& causal_diagonal,
const c10::optional<at::Tensor>& seqlen_k) {
#if defined(USE_FLASH_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
// TODO_DRISS we should return max_seqlen_k;
int64_t max_seqlen_q, max_seqlen_k;
TORCH_CHECK(seqstart_q.has_value() == seqstart_k.has_value());
if (seqstart_q.has_value()) {
TORCH_CHECK(seqstart_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_q->dim() == 1 && seqstart_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_k));
TORCH_CHECK(seqstart_q->size(0) == seqstart_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(query.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
at::Tensor seed_t, offset_t;
const bool use_dropout = std::fpclassify(dropout_p) != FP_ZERO;
// Note [Seed and Offset Device]
// If we are currently in graph capture mode, we need to create the seed and offset tensors on the device.
// This is necessary for CUDA graph-safe random number generation, which requires the seed and offset tensors
// to be single element tensors on device. During graph capture, when the seed and offset tensors are passed
// the pointers act as scratch space for storing the RNG state for the backwards pass.
// When calling backwards, we either construct a PhiloxState with the pointers or the actual values.
// For more information on CUDA graph-safe RNG states, see Note [CUDA Graph-safe RNG states].
at::PhiloxCudaState philox_state;
const bool in_capture_stream =
at::cuda::currentStreamCaptureStatus() != at::cuda::CaptureStatus::None;
auto device = in_capture_stream ? at::kCUDA : at::kCPU;
if (use_dropout) {
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
c10::nullopt, at::cuda::detail::getDefaultCUDAGenerator());
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
// if using dropout, we produce 1 random number for each element of the
// attention tensor
philox_state = gen->philox_cuda_state(B * num_heads * M * N);
if (in_capture_stream) {
// The seed and offset will be populated by the kernel
seed_t = at::empty({}, at::dtype(at::kLong).device(device));
offset_t = at::empty({}, at::dtype(at::kLong).device(device));
} else {
auto [seed, offset] = at::cuda::philox::unpack(philox_state);
seed_t = at::scalar_tensor(
at::Scalar(static_cast<int64_t>(seed)), at::dtype(at::kLong));
offset_t = at::scalar_tensor(
at::Scalar(static_cast<int64_t>(offset)), at::dtype(at::kLong));
}
} else {
// Not using dropout
seed_t = at::empty({}, at::dtype(at::kLong).device(device));
offset_t = at::empty({}, at::dtype(at::kLong).device(device));
}
hipDeviceProp_t* p = at::cuda::getDeviceProperties(query.device().index());
const int computeCapability = p->major * 10 + p->minor;
bool kernel_launched = false;
const auto maxShmem = p->sharedMemPerBlockOptin;
auto launchKernel = [&](auto _k, auto kernel_fn) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
if (kernel_launched) {
return;
}
// Check if this kernel is compatible
if (!Kernel::kSupportsDropout && use_dropout) {
return;
}
if (!Kernel::kSupportsBias && bias.has_value()) {
return;
}
if (value.size(3) > Kernel::kMaxK || key.size(3) > Kernel::kMaxK) {
return;
}
// Alignment
if ((query.stride(2) % Kernel::kAlignmentQ) ||
(key.stride(2) % Kernel::kAlignmentK) ||
(value.stride(2) % Kernel::kAlignmentV)) {
return;
}
// Uses too much shmem
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > maxShmem) {
return;
}
kernel_launched = true;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<
typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (seqstart_q.has_value()) {
p.seqstart_q_ptr = (int32_t*)seqstart_q->data_ptr();
p.seqstart_k_ptr = (int32_t*)seqstart_k->data_ptr();
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B;
p.custom_mask_type = custom_mask_type;
p.causal_diagonal_ptr = nullptr;
if (causal_diagonal.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(causal_diagonal.value());
TORCH_CHECK(causal_diagonal->scalar_type() == at::ScalarType::Int);
p.causal_diagonal_ptr = (int32_t*)causal_diagonal->data_ptr();
}
p.seqlen_k_ptr = nullptr;
if (seqlen_k.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(seqlen_k.value());
TORCH_CHECK(seqlen_k->scalar_type() == at::ScalarType::Int);
p.seqlen_k_ptr = (int32_t*)seqlen_k->data_ptr();
}
p.scale = sdp::calculate_scale(query, scale).as_float_unchecked();
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
ASSIGN_CHECK_OVERFLOW(p.o_strideM, res.stride(1));
if (bias.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA((*bias));
TORCH_CHECK(
bias->scalar_type() == CutlassToAtenDtype<scalar_t>::atScalarType(),
"invalid dtype for bias - should match query's dtype");
p.attn_bias_ptr = (scalar_t*)bias->data_ptr();
// assign strides for bias, viewed as
// (batch_sz, n_heads, n_queries, n_keys)
// We make sure to expand prior to calling the kernel
const at::Tensor& bias_4d_view = *bias;
TORCH_CHECK(bias_4d_view.dim()==4);
TORCH_CHECK(bias_4d_view.size(0)==B);
TORCH_CHECK(bias_4d_view.size(1)==num_heads);
TORCH_CHECK(bias_4d_view.size(2)==M);
TORCH_CHECK(bias_4d_view.size(3)==N);
ASSIGN_CHECK_OVERFLOW(p.bias_strideB, bias_4d_view.stride(0));
ASSIGN_CHECK_OVERFLOW(p.bias_strideH, bias_4d_view.stride(1));
ASSIGN_CHECK_OVERFLOW(p.bias_strideM, bias_4d_view.stride(2));
}
p.use_dropout = use_dropout;
if (p.use_dropout) {
p.rng_engine_inputs = philox_state;
p.dropout_prob = dropout_p;
p.seed = seed_t.data_ptr<int64_t>();
p.extragraph_offset = offset_t.data_ptr<int64_t>();
}
if (smem_bytes > 0xc000) {
auto err = hipFuncSetAttribute(
kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
TORCH_CHECK(
err != hipErrorInvalidValue,
"This GPU does not have enough shared-memory (kernel requires ",
smem_bytes / 1024,
" kb)");
AT_CUDA_CHECK(err);
}
Kernel::check_supported(p);
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, stream, p);
};
// Dispatch to the right kernel
DISPATCH_TYPES(query, ([&]() {
dispatch_cutlassF<scalar_t>(launchKernel, computeCapability);
}));
TORCH_CHECK(kernel_launched, "cutlassF: no kernel found to launch!");
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(
std::move(res),
std::move(logsumexp),
std::move(seed_t),
std::move(offset_t));
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda);
#ifdef USE_FLASH_ATTENTION
namespace {
/**
* simple kernel that populates a tensor with rand uniform values.
* currently only used for testing purposes, not much attention
* is paid to performance.
*
* problem is partitioned as follows:
* - (batch, head) is given by block coordinates
* - each thread handles a row for a given (batch, head)
*/
template <typename mask_t>
__global__ void rand_uniform_kernel(
int64_t n_heads,
int64_t n_queries,
int64_t n_keys,
float dropout_prob,
at::PhiloxCudaState rng_engine_inputs,
mask_t* mask_out,
int64_t mask_numel) {
const int64_t batch_id = blockIdx.x;
const int64_t head_id = blockIdx.y;
const int64_t query_idx = threadIdx.x;
const auto seeds = at::cuda::philox::unpack(rng_engine_inputs);
const int dropout_seq_start = batch_id * (n_heads * n_queries * n_keys) +
head_id * (n_queries * n_keys);
const int64_t query_start_idx = query_idx * n_keys;
hiprandStatePhilox4_32_10_t curand_state;
hiprand_init(
std::get<0>(seeds),
0,
std::get<1>(seeds) + dropout_seq_start + query_start_idx,
&curand_state);
for (int key_start_idx = 0; key_start_idx < n_keys; key_start_idx += 4) {
float4 rand_quad = hiprand_uniform4(&curand_state);
#pragma unroll
for (int i = 0; i < 4; ++i) {
const int64_t linear_idx = dropout_seq_start + query_start_idx + key_start_idx + i;
if (linear_idx < mask_numel) {
mask_out[linear_idx] = (&rand_quad.x)[i];
}
}
}
}
} // namespace
#endif
/**
* fill tensor with random uniform values. only used for testing, not much
* attention is paid to performance
*/
at::Tensor& _fill_mem_eff_dropout_mask_(
Tensor& self,
double dropout_p,
const int64_t seed,
const int64_t offset) {
TORCH_CHECK(self.is_contiguous());
TORCH_CHECK(self.dtype() == at::ScalarType::Float);
const int64_t batch_sz = self.size(0);
const int64_t n_heads = self.size(1);
const int64_t n_queries = self.size(2);
const int64_t n_keys = self.size(3);
#if defined(USE_FLASH_ATTENTION)
at::PhiloxCudaState rng_engine_inputs;
rng_engine_inputs = at::PhiloxCudaState(seed, offset);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(self.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( rand_uniform_kernel<float>), dim3(dim3(batch_sz, n_heads)), dim3(n_queries), 0, stream,
n_heads,
n_queries,
n_keys,
dropout_p,
rng_engine_inputs,
reinterpret_cast<float*>(self.data_ptr()),
self.numel());
return self;
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return self;
}
} // namespace native
} // namespace at
| 9dd84a2798bdff5741993b0818b2e13f6d6e16d9.cu | #include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <c10/util/Logging.h>
#include <c10/util/bit_cast.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <ATen/native/cuda/PersistentSoftmax.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/scalar_tensor.h>
#endif
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/cuda/sdp_utils.h>
#include <ATen/native/transformers/sdp_utils_cpp.h>
#ifdef USE_FLASH_ATTENTION
// FlashAttention Specific Imports
#include <ATen/native/transformers/cuda/flash_attn/fmha_api.h>
// MemoryEfficient Attention Specific Imports
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/kernels/cutlassF.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/pytorch_utils.h>
#endif
namespace at {
namespace native {
namespace {
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_sizes().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = std::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_sizes());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> native_multi_head_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const int64_t num_head,
const Tensor& qkv_weight,
const Tensor& qkv_bias,
const Tensor& proj_weight,
const Tensor& proj_bias,
const c10::optional<Tensor>& mask,
bool need_weights,
bool average_attn_weights,
const c10::optional<int64_t> mask_type) {
// query shape: [B, T, D]
// qkv_weight shape: [3 * D, D]
TORCH_CHECK(
!mask || !query.is_nested(),
"NestedTensor with mask is not supported yet");
const auto D = embed_dim;
TORCH_CHECK(
query.dim() == 3,
"expected 3-D `query`, got ",
query.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || query.sizes()[2] == embed_dim,
"passed-in embed_dim ",
embed_dim,
" didn't match last dim of query ",
query.sizes()[2]);
TORCH_CHECK(
key.dim() == 3,
"expected 3-D `key`, got ",
key.dim(),
"-D tensor");
TORCH_CHECK(
value.dim() == 3,
"expected 3-D `value`, got ",
value.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || key.is_nested() || value.is_nested() ||
(query.sizes() == key.sizes() && key.sizes() == value.sizes()),
"expected `query`/`key`/`value` shapes to match");
TORCH_CHECK(
qkv_weight.dim() == 2,
"expected 2-D `qkv_weight`, got ",
qkv_weight.dim(),
"-D tensor");
TORCH_CHECK(
D * 3 == qkv_weight.sizes()[0],
"expected `qkv_weight` first dim to be 3x embed_dim");
TORCH_CHECK(
D == qkv_weight.sizes()[1],
"expected `qkv_weight` second dim to be embed_Dim");
TORCH_CHECK(
qkv_bias.dim() == 1,
"expected 1-D `qkv_bias`, got ",
qkv_bias.dim(),
"-D tensor");
TORCH_CHECK(
qkv_bias.sizes()[0] == 3 * D,
"expected `qkv_bias` first dim and first dim of query to be equal");
TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`");
#ifndef NDEBUG
const auto B = query.is_nested()
? get_nested_tensor_impl(query)->get_nested_sizes().size(0)
: query.sizes()[0];
auto T = query.is_nested() ? 0 : query.sizes()[1];
#endif
const auto dim_per_head = D / num_head;
if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) {
// We have not done linear projection yet but the input for SDP
// Is expected to be 4 dimensional. We "cheaply" create view tensors
// That will then be used for checking hot path conditions with select_sd_backend
auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, false};
auto backend = select_sdp_backend(kernel_params);
// strides from packed projection for nested tensors when seq_len is 1 will be
// and will trigger a contiguous call in the kernel, so we prevent this
bool no_seq_len_1_nested = query.is_nested() ? check_for_seq_len_1_nested_tensor(kernel_params, false) : true;
// The API for transfomer_encoder is a mask of shape (Batch_Size, Seq_len_q)
// For mem-eff attention this will cause the expand call to error
// For now I am going to turn of that path not have to deal with all the annoying
// Mask type shape grossness
if (!mask.has_value() && no_seq_len_1_nested &&
(backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention)) {
auto x = at::linear(query, qkv_weight, qkv_bias);
auto chunks = x.chunk(3, -1);
auto x_size_0 = x.size(0);
chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
auto y = at::scaled_dot_product_attention(
chunks[0], chunks[1], chunks[2], mask, 0.0, false, c10::nullopt);
auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim});
return std::make_tuple(
at::linear(past_sdp, proj_weight, proj_bias), Tensor());
}
// Returned math or error lets not use it
}
// shape: [B, T, 3 x D]
auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight);
if (!qkv.is_nested() && qkv.numel() == 0) {
if (query.is_nested()) {
return std::make_tuple(Tensor(), Tensor());
}
return std::make_tuple(at::empty_like(query), Tensor());
}
#ifndef NDEBUG
if (!query.is_nested() || !qkv.is_nested()) {
if (query.is_nested()) {
T = qkv.size(1);
}
debug_assert_shape(__LINE__, qkv, {B, T, 3 * D});
}
#endif
#ifdef DEBUG_PRINT_EACH_STEP
if (!qkv.is_nested()) {
std::cerr << "qkv: " << qkv << std::endl;
}
#endif
// shape: 3 x [B, num_head, T, dim_per_head]
auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head);
qkv = Tensor(); // Not used any more, allow free
auto& q = std::get<0>(q_k_v);
const auto& k = std::get<1>(q_k_v);
const auto& v = std::get<2>(q_k_v);
#ifndef NDEBUG
debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "q: " << q << std::endl;
std::cerr << "k: " << k << std::endl;
std::cerr << "v: " << v << std::endl;
#endif
// shape: [B, num_head, T, T]
auto qkt = bmm_nt(q, k);
// q & k are dead but cannot be freed because they were packed with v
#ifndef NDEBUG
debug_assert_shape(__LINE__, qkt, {B, num_head, T, T});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, T]
// TODO: long-term, have a kernel that works with
// NestedTensor directly if there is no mask passed
qkt = masked_softmax(qkt, mask, query, mask_type);
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt after softmax: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, dim_per_head]
// reuse storage for q; we're done with it
auto attn_ctx = bmm_nn(q, qkt, v);
// qkv is not dead; we just reused storage for q!
if (!need_weights) {
qkt = Tensor();
}
#ifndef NDEBUG
debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "attn_ctx: " << attn_ctx << std::endl;
#endif
// shape: [B, T, D]
// Fuse transform_0213 inside
auto proj = transform0213_gemm_nt_bias(
attn_ctx, proj_weight, proj_bias, query);
#ifndef NDEBUG
debug_assert_shape(__LINE__, proj, {B, T, D});
#endif
if (need_weights && average_attn_weights) {
// weights are not needed for full transformer, so don't worry too
// much about performance -- we implement this just to make use
// cases that don't disable need_weights still get some speedup.
qkt = qkt.sum(1);
qkt /= num_head;
}
return std::make_tuple(std::move(proj), std::move(qkt));
}
std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor, Tensor, Tensor> _scaled_dot_product_flash_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention");
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t head_dim = query.size(3);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor cumulative_sequence_length_q = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_q,
max_seqlen_batch_q,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
Tensor cumulative_sequence_length_k = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_k,
max_seqlen_batch_k,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor attention, log_sumexp, debug_attn_mask, philox_seed, philox_offset;
std::tie(attention, log_sumexp, philox_seed, philox_offset, debug_attn_mask) =
at::_flash_attention_forward(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal,
return_debug_mask,
scale);
// Reshape output to convert nnz to batch_size and seq_len
attention =
attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
return std::make_tuple(attention, log_sumexp, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, philox_seed, philox_offset, debug_attn_mask);
}
std::tuple<Tensor, Tensor, Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const c10::optional<at::Tensor>& attn_bias,
bool compute_log_sumexp,
double dropout_p,
bool is_causal,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
sdp::CustomMaskType custom_mask_type = is_causal
? sdp::CustomMaskType::CausalFromTopLeft
: sdp::CustomMaskType::NoCustomMask;
auto [attention, log_sumexp, seed, offset] = at::_efficient_attention_forward(
q_t,
k_t,
v_t,
attn_bias,
c10::nullopt,
c10::nullopt,
c10::nullopt,
dropout_p /*dropout_p*/,
static_cast<int64_t>(custom_mask_type),
compute_log_sumexp,
scale);
attention = attention.transpose(1, 2);
return std::make_tuple(std::move(attention), std::move(log_sumexp), std::move(seed), std::move(offset));
}
int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, is_causal};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::error) {
TORCH_CHECK(
false,
"No viable backend for scaled_dot_product_attention was found. ",
"This is likely due to turning off both the math kernel and the fused kernels.");
}
return static_cast<int64_t>(backend);
}
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _flash_attention_forward(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
#if defined(USE_FLASH_ATTENTION)
/*
num_splits determines how much to parallelize over the seqlen_q dimension
num_splits=0 means
it will be set by an internal heuristic. We're exposing num_splits mostly for
benchmarking. We will hard code it to 0 for now
*/
constexpr int num_splits{0};
const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked();
at::Tensor output = at::empty_like(query);
auto [logsumexp, philox_seed, philox_offset, debug_attn_mask] = pytorch_fmha::mha_fwd(
query,
key,
value,
output,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false, /*zero_tensors = false for all calls here*/
is_causal,
return_debug_mask, /*return_softmax (this is used for testing)*/
num_splits);
debug_attn_mask =
return_debug_mask ? debug_attn_mask : at::empty({0}, query.options());
return std::make_tuple(output, logsumexp, philox_seed, philox_offset, debug_attn_mask);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor(), Tensor(), Tensor(), Tensor(), Tensor());
}
std::tuple<at::Tensor, at::Tensor, Tensor, Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
const c10::optional<at::Tensor>& bias, // [b, num_heads, seqlen, seqlen]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& seqstart_q,
// (Mode 1MHK only) [b+1]: cu_seqlen_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& seqstart_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
double dropout_p, // attention matrix dropout probability
int64_t custom_mask_type,
bool compute_logsumexp,
c10::optional<double> scale,
const c10::optional<at::Tensor>& causal_diagonal,
const c10::optional<at::Tensor>& seqlen_k) {
#if defined(USE_FLASH_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
// TODO_DRISS we should return max_seqlen_k;
int64_t max_seqlen_q, max_seqlen_k;
TORCH_CHECK(seqstart_q.has_value() == seqstart_k.has_value());
if (seqstart_q.has_value()) {
TORCH_CHECK(seqstart_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_q->dim() == 1 && seqstart_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_k));
TORCH_CHECK(seqstart_q->size(0) == seqstart_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::cuda::CUDAGuard device_guard(query.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
at::Tensor seed_t, offset_t;
const bool use_dropout = std::fpclassify(dropout_p) != FP_ZERO;
// Note [Seed and Offset Device]
// If we are currently in graph capture mode, we need to create the seed and offset tensors on the device.
// This is necessary for CUDA graph-safe random number generation, which requires the seed and offset tensors
// to be single element tensors on device. During graph capture, when the seed and offset tensors are passed
// the pointers act as scratch space for storing the RNG state for the backwards pass.
// When calling backwards, we either construct a PhiloxState with the pointers or the actual values.
// For more information on CUDA graph-safe RNG states, see Note [CUDA Graph-safe RNG states].
at::PhiloxCudaState philox_state;
const bool in_capture_stream =
at::cuda::currentStreamCaptureStatus() != at::cuda::CaptureStatus::None;
auto device = in_capture_stream ? at::kCUDA : at::kCPU;
if (use_dropout) {
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
c10::nullopt, at::cuda::detail::getDefaultCUDAGenerator());
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
// if using dropout, we produce 1 random number for each element of the
// attention tensor
philox_state = gen->philox_cuda_state(B * num_heads * M * N);
if (in_capture_stream) {
// The seed and offset will be populated by the kernel
seed_t = at::empty({}, at::dtype(at::kLong).device(device));
offset_t = at::empty({}, at::dtype(at::kLong).device(device));
} else {
auto [seed, offset] = at::cuda::philox::unpack(philox_state);
seed_t = at::scalar_tensor(
at::Scalar(static_cast<int64_t>(seed)), at::dtype(at::kLong));
offset_t = at::scalar_tensor(
at::Scalar(static_cast<int64_t>(offset)), at::dtype(at::kLong));
}
} else {
// Not using dropout
seed_t = at::empty({}, at::dtype(at::kLong).device(device));
offset_t = at::empty({}, at::dtype(at::kLong).device(device));
}
cudaDeviceProp* p = at::cuda::getDeviceProperties(query.device().index());
const int computeCapability = p->major * 10 + p->minor;
bool kernel_launched = false;
const auto maxShmem = p->sharedMemPerBlockOptin;
auto launchKernel = [&](auto _k, auto kernel_fn) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
if (kernel_launched) {
return;
}
// Check if this kernel is compatible
if (!Kernel::kSupportsDropout && use_dropout) {
return;
}
if (!Kernel::kSupportsBias && bias.has_value()) {
return;
}
if (value.size(3) > Kernel::kMaxK || key.size(3) > Kernel::kMaxK) {
return;
}
// Alignment
if ((query.stride(2) % Kernel::kAlignmentQ) ||
(key.stride(2) % Kernel::kAlignmentK) ||
(value.stride(2) % Kernel::kAlignmentV)) {
return;
}
// Uses too much shmem
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > maxShmem) {
return;
}
kernel_launched = true;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<
typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (seqstart_q.has_value()) {
p.seqstart_q_ptr = (int32_t*)seqstart_q->data_ptr();
p.seqstart_k_ptr = (int32_t*)seqstart_k->data_ptr();
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B;
p.custom_mask_type = custom_mask_type;
p.causal_diagonal_ptr = nullptr;
if (causal_diagonal.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(causal_diagonal.value());
TORCH_CHECK(causal_diagonal->scalar_type() == at::ScalarType::Int);
p.causal_diagonal_ptr = (int32_t*)causal_diagonal->data_ptr();
}
p.seqlen_k_ptr = nullptr;
if (seqlen_k.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(seqlen_k.value());
TORCH_CHECK(seqlen_k->scalar_type() == at::ScalarType::Int);
p.seqlen_k_ptr = (int32_t*)seqlen_k->data_ptr();
}
p.scale = sdp::calculate_scale(query, scale).as_float_unchecked();
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
ASSIGN_CHECK_OVERFLOW(p.o_strideM, res.stride(1));
if (bias.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA((*bias));
TORCH_CHECK(
bias->scalar_type() == CutlassToAtenDtype<scalar_t>::atScalarType(),
"invalid dtype for bias - should match query's dtype");
p.attn_bias_ptr = (scalar_t*)bias->data_ptr();
// assign strides for bias, viewed as
// (batch_sz, n_heads, n_queries, n_keys)
// We make sure to expand prior to calling the kernel
const at::Tensor& bias_4d_view = *bias;
TORCH_CHECK(bias_4d_view.dim()==4);
TORCH_CHECK(bias_4d_view.size(0)==B);
TORCH_CHECK(bias_4d_view.size(1)==num_heads);
TORCH_CHECK(bias_4d_view.size(2)==M);
TORCH_CHECK(bias_4d_view.size(3)==N);
ASSIGN_CHECK_OVERFLOW(p.bias_strideB, bias_4d_view.stride(0));
ASSIGN_CHECK_OVERFLOW(p.bias_strideH, bias_4d_view.stride(1));
ASSIGN_CHECK_OVERFLOW(p.bias_strideM, bias_4d_view.stride(2));
}
p.use_dropout = use_dropout;
if (p.use_dropout) {
p.rng_engine_inputs = philox_state;
p.dropout_prob = dropout_p;
p.seed = seed_t.data_ptr<int64_t>();
p.extragraph_offset = offset_t.data_ptr<int64_t>();
}
if (smem_bytes > 0xc000) {
auto err = cudaFuncSetAttribute(
kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
TORCH_CHECK(
err != cudaErrorInvalidValue,
"This GPU does not have enough shared-memory (kernel requires ",
smem_bytes / 1024,
" kb)");
AT_CUDA_CHECK(err);
}
Kernel::check_supported(p);
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p);
};
// Dispatch to the right kernel
DISPATCH_TYPES(query, ([&]() {
dispatch_cutlassF<scalar_t>(launchKernel, computeCapability);
}));
TORCH_CHECK(kernel_launched, "cutlassF: no kernel found to launch!");
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(
std::move(res),
std::move(logsumexp),
std::move(seed_t),
std::move(offset_t));
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda);
#ifdef USE_FLASH_ATTENTION
namespace {
/**
* simple kernel that populates a tensor with rand uniform values.
* currently only used for testing purposes, not much attention
* is paid to performance.
*
* problem is partitioned as follows:
* - (batch, head) is given by block coordinates
* - each thread handles a row for a given (batch, head)
*/
template <typename mask_t>
__global__ void rand_uniform_kernel(
int64_t n_heads,
int64_t n_queries,
int64_t n_keys,
float dropout_prob,
at::PhiloxCudaState rng_engine_inputs,
mask_t* mask_out,
int64_t mask_numel) {
const int64_t batch_id = blockIdx.x;
const int64_t head_id = blockIdx.y;
const int64_t query_idx = threadIdx.x;
const auto seeds = at::cuda::philox::unpack(rng_engine_inputs);
const int dropout_seq_start = batch_id * (n_heads * n_queries * n_keys) +
head_id * (n_queries * n_keys);
const int64_t query_start_idx = query_idx * n_keys;
curandStatePhilox4_32_10_t curand_state;
curand_init(
std::get<0>(seeds),
0,
std::get<1>(seeds) + dropout_seq_start + query_start_idx,
&curand_state);
for (int key_start_idx = 0; key_start_idx < n_keys; key_start_idx += 4) {
float4 rand_quad = curand_uniform4(&curand_state);
#pragma unroll
for (int i = 0; i < 4; ++i) {
const int64_t linear_idx = dropout_seq_start + query_start_idx + key_start_idx + i;
if (linear_idx < mask_numel) {
mask_out[linear_idx] = (&rand_quad.x)[i];
}
}
}
}
} // namespace
#endif
/**
* fill tensor with random uniform values. only used for testing, not much
* attention is paid to performance
*/
at::Tensor& _fill_mem_eff_dropout_mask_(
Tensor& self,
double dropout_p,
const int64_t seed,
const int64_t offset) {
TORCH_CHECK(self.is_contiguous());
TORCH_CHECK(self.dtype() == at::ScalarType::Float);
const int64_t batch_sz = self.size(0);
const int64_t n_heads = self.size(1);
const int64_t n_queries = self.size(2);
const int64_t n_keys = self.size(3);
#if defined(USE_FLASH_ATTENTION)
at::PhiloxCudaState rng_engine_inputs;
rng_engine_inputs = at::PhiloxCudaState(seed, offset);
at::cuda::CUDAGuard device_guard(self.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
rand_uniform_kernel<float><<<dim3(batch_sz, n_heads), n_queries, 0, stream>>>(
n_heads,
n_queries,
n_keys,
dropout_p,
rng_engine_inputs,
reinterpret_cast<float*>(self.data_ptr()),
self.numel());
return self;
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return self;
}
} // namespace native
} // namespace at
|
117c65471e37a44c3596057825da88bca8d1d1ac.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=1024 --gridDim=1 --no-inline
#include <hip/hip_runtime.h>
#include <stdio.h>
#define N 2 //1024
__global__ void definitions (unsigned int* B)
{
atomicDec(B,7);//0111 -> 1000 -> 0000 -> 0001 -> 0010 -> 0011 -> 0100 -> 0101 -> 0110 ...
/*the second argument on atomicDec() is a limit for decs. When this limit is reached, B receives <LIM>*/
}
int main (){
unsigned int b = 5;
unsigned int *dev_b;
hipMalloc ((void**) &dev_b, sizeof(unsigned int));
hipMemcpy(dev_b, &b, sizeof(unsigned int),hipMemcpyHostToDevice);
//definitions <<<1,N>>>(dev_b);
ESBMC_verify_kernel_ui(definitions,1,N,dev_b);
hipMemcpy(&b,dev_b,sizeof(unsigned int),hipMemcpyDeviceToHost);
printf("B: %u\n", b);
assert(b==3);
hipFree(dev_b);
return 0;
}
| 117c65471e37a44c3596057825da88bca8d1d1ac.cu | //pass
//--blockDim=1024 --gridDim=1 --no-inline
#include <cuda.h>
#include <stdio.h>
#define N 2 //1024
__global__ void definitions (unsigned int* B)
{
atomicDec(B,7);//0111 -> 1000 -> 0000 -> 0001 -> 0010 -> 0011 -> 0100 -> 0101 -> 0110 ...
/*the second argument on atomicDec() is a limit for decs. When this limit is reached, B receives <LIM>*/
}
int main (){
unsigned int b = 5;
unsigned int *dev_b;
cudaMalloc ((void**) &dev_b, sizeof(unsigned int));
cudaMemcpy(dev_b, &b, sizeof(unsigned int),cudaMemcpyHostToDevice);
//definitions <<<1,N>>>(dev_b);
ESBMC_verify_kernel_ui(definitions,1,N,dev_b);
cudaMemcpy(&b,dev_b,sizeof(unsigned int),cudaMemcpyDeviceToHost);
printf("B: %u\n", b);
assert(b==3);
cudaFree(dev_b);
return 0;
}
|
4652f4e6f26406cefb3ce123dd4c9360695455ed.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governin_from_mtxg permissions and
* limitations under the License.
*/
#include <prims/property_generator.cuh>
#include <prims/fill_edge_src_dst_property.cuh>
#include <prims/per_v_transform_reduce_incoming_outgoing_e.cuh>
#include <prims/reduce_op.cuh>
#include <prims/update_edge_src_dst_property.cuh>
#include <cugraph/edge_property.hpp>
#include <cugraph/edge_src_dst_property.hpp>
#include <cugraph/graph_functions.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/utilities/host_scalar_comm.hpp>
#include <utilities/base_fixture.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/edge_partition_view.hpp>
#include <cugraph/edge_src_dst_property.hpp>
#include <cugraph/utilities/dataframe_buffer.hpp>
#include <cugraph/utilities/high_res_timer.hpp>
#include <raft/random/rng_state.hpp>
#include <chrono>
#include <iostream>
#include <random>
#include <gtest/gtest.h>
struct MaximalIndependentSet_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MGMaximalIndependentSet
: public ::testing::TestWithParam<std::tuple<MaximalIndependentSet_Usecase, input_usecase_t>> {
public:
Tests_MGMaximalIndependentSet() {}
static void SetUpTestCase() { handle_ = cugraph::test::initialize_mg_handle(); }
static void TearDownTestCase() { handle_.reset(); }
virtual void SetUp() {}
virtual void TearDown() {}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void run_current_test(std::tuple<MaximalIndependentSet_Usecase, input_usecase_t> const& param)
{
auto [mis_usecase, input_usecase] = param;
auto const comm_rank = handle_->get_comms().get_rank();
auto const comm_size = handle_->get_comms().get_size();
HighResTimer hr_timer{};
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(hipDeviceSynchronize());
handle_->get_comms().barrier();
hr_timer.start("MG Construct graph");
}
constexpr bool multi_gpu = true;
auto [mg_graph, mg_edge_weights, mg_renumber_map] =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, false, multi_gpu>(
*handle_, input_usecase, false, true);
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(hipDeviceSynchronize());
handle_->get_comms().barrier();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
}
auto mg_graph_view = mg_graph.view();
auto mg_edge_weight_view =
mg_edge_weights ? std::make_optional((*mg_edge_weights).view()) : std::nullopt;
raft::random::RngState rng_state(multi_gpu ? handle_->get_comms().get_rank() : 0);
auto d_mis = cugraph::maximal_independent_set<vertex_t, edge_t, multi_gpu>(
*handle_, mg_graph_view, rng_state);
// Test MIS
if (mis_usecase.check_correctness) {
RAFT_CUDA_TRY(hipDeviceSynchronize());
std::vector<vertex_t> h_mis(d_mis.size());
raft::update_host(h_mis.data(), d_mis.data(), d_mis.size(), handle_->get_stream());
RAFT_CUDA_TRY(hipDeviceSynchronize());
auto vertex_first = mg_graph_view.local_vertex_partition_range_first();
auto vertex_last = mg_graph_view.local_vertex_partition_range_last();
std::for_each(h_mis.begin(), h_mis.end(), [vertex_first, vertex_last](vertex_t v) {
ASSERT_TRUE((v >= vertex_first) && (v < vertex_last));
});
// If a vertex is included in MIS, then none of its neighbor should be
vertex_t local_vtx_partitoin_size = mg_graph_view.local_vertex_partition_range_size();
rmm::device_uvector<vertex_t> d_total_outgoing_nbrs_included_mis(local_vtx_partitoin_size,
handle_->get_stream());
rmm::device_uvector<vertex_t> inclusiong_flags(local_vtx_partitoin_size,
handle_->get_stream());
thrust::uninitialized_fill(handle_->get_thrust_policy(),
inclusiong_flags.begin(),
inclusiong_flags.end(),
vertex_t{0});
thrust::for_each(
handle_->get_thrust_policy(),
d_mis.begin(),
d_mis.end(),
[inclusiong_flags =
raft::device_span<vertex_t>(inclusiong_flags.data(), inclusiong_flags.size()),
v_first = mg_graph_view.local_vertex_partition_range_first()] __device__(auto v) {
auto v_offset = v - v_first;
inclusiong_flags[v_offset] = vertex_t{1};
});
RAFT_CUDA_TRY(hipDeviceSynchronize());
// Cache for inclusiong_flags
using GraphViewType = cugraph::graph_view_t<vertex_t, edge_t, false, true>;
cugraph::edge_src_property_t<GraphViewType, vertex_t> src_inclusion_cache(*handle_);
cugraph::edge_dst_property_t<GraphViewType, vertex_t> dst_inclusion_cache(*handle_);
if constexpr (multi_gpu) {
src_inclusion_cache =
cugraph::edge_src_property_t<GraphViewType, vertex_t>(*handle_, mg_graph_view);
dst_inclusion_cache =
cugraph::edge_dst_property_t<GraphViewType, vertex_t>(*handle_, mg_graph_view);
update_edge_src_property(
*handle_, mg_graph_view, inclusiong_flags.begin(), src_inclusion_cache);
update_edge_dst_property(
*handle_, mg_graph_view, inclusiong_flags.begin(), dst_inclusion_cache);
}
per_v_transform_reduce_outgoing_e(
*handle_,
mg_graph_view,
multi_gpu ? src_inclusion_cache.view()
: cugraph::detail::edge_major_property_view_t<vertex_t, vertex_t const*>(
inclusiong_flags.data()),
multi_gpu ? dst_inclusion_cache.view()
: cugraph::detail::edge_minor_property_view_t<vertex_t, vertex_t const*>(
inclusiong_flags.data(), vertex_t{0}),
cugraph::edge_dummy_property_t{}.view(),
[] __device__(auto src, auto dst, auto src_included, auto dst_included, auto wt) {
return (src == dst) ? 0 : dst_included;
},
vertex_t{0},
cugraph::reduce_op::plus<vertex_t>{},
d_total_outgoing_nbrs_included_mis.begin());
RAFT_CUDA_TRY(hipDeviceSynchronize());
std::vector<vertex_t> h_total_outgoing_nbrs_included_mis(
d_total_outgoing_nbrs_included_mis.size());
raft::update_host(h_total_outgoing_nbrs_included_mis.data(),
d_total_outgoing_nbrs_included_mis.data(),
d_total_outgoing_nbrs_included_mis.size(),
handle_->get_stream());
RAFT_CUDA_TRY(hipDeviceSynchronize());
{
auto vertex_first = mg_graph_view.local_vertex_partition_range_first();
auto vertex_last = mg_graph_view.local_vertex_partition_range_last();
std::for_each(h_mis.begin(),
h_mis.end(),
[vertex_first, vertex_last, &h_total_outgoing_nbrs_included_mis](vertex_t v) {
ASSERT_TRUE((v >= vertex_first) && (v < vertex_last))
<< v << " is not within vertex parition range" << std::endl;
ASSERT_TRUE(h_total_outgoing_nbrs_included_mis[v - vertex_first] == 0)
<< v << "'s neighbor is included in MIS" << std::endl;
});
}
}
}
private:
static std::unique_ptr<raft::handle_t> handle_;
};
template <typename input_usecase_t>
std::unique_ptr<raft::handle_t> Tests_MGMaximalIndependentSet<input_usecase_t>::handle_ = nullptr;
using Tests_MGMaximalIndependentSet_File =
Tests_MGMaximalIndependentSet<cugraph::test::File_Usecase>;
using Tests_MGMaximalIndependentSet_Rmat =
Tests_MGMaximalIndependentSet<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MGMaximalIndependentSet_File, CheckInt32Int32FloatFloat)
{
run_current_test<int32_t, int32_t, float, int>(
override_File_Usecase_with_cmd_line_arguments(GetParam()));
}
TEST_P(Tests_MGMaximalIndependentSet_File, CheckInt32Int64FloatFloat)
{
run_current_test<int32_t, int64_t, float, int>(
override_File_Usecase_with_cmd_line_arguments(GetParam()));
}
TEST_P(Tests_MGMaximalIndependentSet_File, CheckInt64Int64FloatFloat)
{
run_current_test<int64_t, int64_t, float, int>(
override_File_Usecase_with_cmd_line_arguments(GetParam()));
}
TEST_P(Tests_MGMaximalIndependentSet_Rmat, CheckInt32Int32FloatFloat)
{
run_current_test<int32_t, int32_t, float, int>(
override_Rmat_Usecase_with_cmd_line_arguments(GetParam()));
}
TEST_P(Tests_MGMaximalIndependentSet_Rmat, CheckInt32Int64FloatFloat)
{
run_current_test<int32_t, int64_t, float, int>(
override_Rmat_Usecase_with_cmd_line_arguments(GetParam()));
}
TEST_P(Tests_MGMaximalIndependentSet_Rmat, CheckInt64Int64FloatFloat)
{
run_current_test<int64_t, int64_t, float, int>(
override_Rmat_Usecase_with_cmd_line_arguments(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MGMaximalIndependentSet_File,
::testing::Combine(::testing::Values(MaximalIndependentSet_Usecase{false},
MaximalIndependentSet_Usecase{false}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"))));
INSTANTIATE_TEST_SUITE_P(rmat_small_test,
Tests_MGMaximalIndependentSet_Rmat,
::testing::Combine(::testing::Values(MaximalIndependentSet_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(
3, 4, 0.57, 0.19, 0.19, 0, true, false))));
INSTANTIATE_TEST_SUITE_P(
rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with
--gtest_filter to select only the rmat_benchmark_test with a specific
vertex & edge type combination) by command line arguments and do not
include more than one Rmat_Usecase that differ only in scale or edge
factor (to avoid running same benchmarks more than once) */
Tests_MGMaximalIndependentSet_Rmat,
::testing::Combine(
::testing::Values(MaximalIndependentSet_Usecase{false}, MaximalIndependentSet_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
| 4652f4e6f26406cefb3ce123dd4c9360695455ed.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governin_from_mtxg permissions and
* limitations under the License.
*/
#include <prims/property_generator.cuh>
#include <prims/fill_edge_src_dst_property.cuh>
#include <prims/per_v_transform_reduce_incoming_outgoing_e.cuh>
#include <prims/reduce_op.cuh>
#include <prims/update_edge_src_dst_property.cuh>
#include <cugraph/edge_property.hpp>
#include <cugraph/edge_src_dst_property.hpp>
#include <cugraph/graph_functions.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/utilities/host_scalar_comm.hpp>
#include <utilities/base_fixture.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/edge_partition_view.hpp>
#include <cugraph/edge_src_dst_property.hpp>
#include <cugraph/utilities/dataframe_buffer.hpp>
#include <cugraph/utilities/high_res_timer.hpp>
#include <raft/random/rng_state.hpp>
#include <chrono>
#include <iostream>
#include <random>
#include <gtest/gtest.h>
struct MaximalIndependentSet_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MGMaximalIndependentSet
: public ::testing::TestWithParam<std::tuple<MaximalIndependentSet_Usecase, input_usecase_t>> {
public:
Tests_MGMaximalIndependentSet() {}
static void SetUpTestCase() { handle_ = cugraph::test::initialize_mg_handle(); }
static void TearDownTestCase() { handle_.reset(); }
virtual void SetUp() {}
virtual void TearDown() {}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void run_current_test(std::tuple<MaximalIndependentSet_Usecase, input_usecase_t> const& param)
{
auto [mis_usecase, input_usecase] = param;
auto const comm_rank = handle_->get_comms().get_rank();
auto const comm_size = handle_->get_comms().get_size();
HighResTimer hr_timer{};
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize());
handle_->get_comms().barrier();
hr_timer.start("MG Construct graph");
}
constexpr bool multi_gpu = true;
auto [mg_graph, mg_edge_weights, mg_renumber_map] =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, false, multi_gpu>(
*handle_, input_usecase, false, true);
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize());
handle_->get_comms().barrier();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
}
auto mg_graph_view = mg_graph.view();
auto mg_edge_weight_view =
mg_edge_weights ? std::make_optional((*mg_edge_weights).view()) : std::nullopt;
raft::random::RngState rng_state(multi_gpu ? handle_->get_comms().get_rank() : 0);
auto d_mis = cugraph::maximal_independent_set<vertex_t, edge_t, multi_gpu>(
*handle_, mg_graph_view, rng_state);
// Test MIS
if (mis_usecase.check_correctness) {
RAFT_CUDA_TRY(cudaDeviceSynchronize());
std::vector<vertex_t> h_mis(d_mis.size());
raft::update_host(h_mis.data(), d_mis.data(), d_mis.size(), handle_->get_stream());
RAFT_CUDA_TRY(cudaDeviceSynchronize());
auto vertex_first = mg_graph_view.local_vertex_partition_range_first();
auto vertex_last = mg_graph_view.local_vertex_partition_range_last();
std::for_each(h_mis.begin(), h_mis.end(), [vertex_first, vertex_last](vertex_t v) {
ASSERT_TRUE((v >= vertex_first) && (v < vertex_last));
});
// If a vertex is included in MIS, then none of its neighbor should be
vertex_t local_vtx_partitoin_size = mg_graph_view.local_vertex_partition_range_size();
rmm::device_uvector<vertex_t> d_total_outgoing_nbrs_included_mis(local_vtx_partitoin_size,
handle_->get_stream());
rmm::device_uvector<vertex_t> inclusiong_flags(local_vtx_partitoin_size,
handle_->get_stream());
thrust::uninitialized_fill(handle_->get_thrust_policy(),
inclusiong_flags.begin(),
inclusiong_flags.end(),
vertex_t{0});
thrust::for_each(
handle_->get_thrust_policy(),
d_mis.begin(),
d_mis.end(),
[inclusiong_flags =
raft::device_span<vertex_t>(inclusiong_flags.data(), inclusiong_flags.size()),
v_first = mg_graph_view.local_vertex_partition_range_first()] __device__(auto v) {
auto v_offset = v - v_first;
inclusiong_flags[v_offset] = vertex_t{1};
});
RAFT_CUDA_TRY(cudaDeviceSynchronize());
// Cache for inclusiong_flags
using GraphViewType = cugraph::graph_view_t<vertex_t, edge_t, false, true>;
cugraph::edge_src_property_t<GraphViewType, vertex_t> src_inclusion_cache(*handle_);
cugraph::edge_dst_property_t<GraphViewType, vertex_t> dst_inclusion_cache(*handle_);
if constexpr (multi_gpu) {
src_inclusion_cache =
cugraph::edge_src_property_t<GraphViewType, vertex_t>(*handle_, mg_graph_view);
dst_inclusion_cache =
cugraph::edge_dst_property_t<GraphViewType, vertex_t>(*handle_, mg_graph_view);
update_edge_src_property(
*handle_, mg_graph_view, inclusiong_flags.begin(), src_inclusion_cache);
update_edge_dst_property(
*handle_, mg_graph_view, inclusiong_flags.begin(), dst_inclusion_cache);
}
per_v_transform_reduce_outgoing_e(
*handle_,
mg_graph_view,
multi_gpu ? src_inclusion_cache.view()
: cugraph::detail::edge_major_property_view_t<vertex_t, vertex_t const*>(
inclusiong_flags.data()),
multi_gpu ? dst_inclusion_cache.view()
: cugraph::detail::edge_minor_property_view_t<vertex_t, vertex_t const*>(
inclusiong_flags.data(), vertex_t{0}),
cugraph::edge_dummy_property_t{}.view(),
[] __device__(auto src, auto dst, auto src_included, auto dst_included, auto wt) {
return (src == dst) ? 0 : dst_included;
},
vertex_t{0},
cugraph::reduce_op::plus<vertex_t>{},
d_total_outgoing_nbrs_included_mis.begin());
RAFT_CUDA_TRY(cudaDeviceSynchronize());
std::vector<vertex_t> h_total_outgoing_nbrs_included_mis(
d_total_outgoing_nbrs_included_mis.size());
raft::update_host(h_total_outgoing_nbrs_included_mis.data(),
d_total_outgoing_nbrs_included_mis.data(),
d_total_outgoing_nbrs_included_mis.size(),
handle_->get_stream());
RAFT_CUDA_TRY(cudaDeviceSynchronize());
{
auto vertex_first = mg_graph_view.local_vertex_partition_range_first();
auto vertex_last = mg_graph_view.local_vertex_partition_range_last();
std::for_each(h_mis.begin(),
h_mis.end(),
[vertex_first, vertex_last, &h_total_outgoing_nbrs_included_mis](vertex_t v) {
ASSERT_TRUE((v >= vertex_first) && (v < vertex_last))
<< v << " is not within vertex parition range" << std::endl;
ASSERT_TRUE(h_total_outgoing_nbrs_included_mis[v - vertex_first] == 0)
<< v << "'s neighbor is included in MIS" << std::endl;
});
}
}
}
private:
static std::unique_ptr<raft::handle_t> handle_;
};
template <typename input_usecase_t>
std::unique_ptr<raft::handle_t> Tests_MGMaximalIndependentSet<input_usecase_t>::handle_ = nullptr;
using Tests_MGMaximalIndependentSet_File =
Tests_MGMaximalIndependentSet<cugraph::test::File_Usecase>;
using Tests_MGMaximalIndependentSet_Rmat =
Tests_MGMaximalIndependentSet<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MGMaximalIndependentSet_File, CheckInt32Int32FloatFloat)
{
run_current_test<int32_t, int32_t, float, int>(
override_File_Usecase_with_cmd_line_arguments(GetParam()));
}
TEST_P(Tests_MGMaximalIndependentSet_File, CheckInt32Int64FloatFloat)
{
run_current_test<int32_t, int64_t, float, int>(
override_File_Usecase_with_cmd_line_arguments(GetParam()));
}
TEST_P(Tests_MGMaximalIndependentSet_File, CheckInt64Int64FloatFloat)
{
run_current_test<int64_t, int64_t, float, int>(
override_File_Usecase_with_cmd_line_arguments(GetParam()));
}
TEST_P(Tests_MGMaximalIndependentSet_Rmat, CheckInt32Int32FloatFloat)
{
run_current_test<int32_t, int32_t, float, int>(
override_Rmat_Usecase_with_cmd_line_arguments(GetParam()));
}
TEST_P(Tests_MGMaximalIndependentSet_Rmat, CheckInt32Int64FloatFloat)
{
run_current_test<int32_t, int64_t, float, int>(
override_Rmat_Usecase_with_cmd_line_arguments(GetParam()));
}
TEST_P(Tests_MGMaximalIndependentSet_Rmat, CheckInt64Int64FloatFloat)
{
run_current_test<int64_t, int64_t, float, int>(
override_Rmat_Usecase_with_cmd_line_arguments(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MGMaximalIndependentSet_File,
::testing::Combine(::testing::Values(MaximalIndependentSet_Usecase{false},
MaximalIndependentSet_Usecase{false}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"))));
INSTANTIATE_TEST_SUITE_P(rmat_small_test,
Tests_MGMaximalIndependentSet_Rmat,
::testing::Combine(::testing::Values(MaximalIndependentSet_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(
3, 4, 0.57, 0.19, 0.19, 0, true, false))));
INSTANTIATE_TEST_SUITE_P(
rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with
--gtest_filter to select only the rmat_benchmark_test with a specific
vertex & edge type combination) by command line arguments and do not
include more than one Rmat_Usecase that differ only in scale or edge
factor (to avoid running same benchmarks more than once) */
Tests_MGMaximalIndependentSet_Rmat,
::testing::Combine(
::testing::Values(MaximalIndependentSet_Usecase{false}, MaximalIndependentSet_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
|
968c0b782fe2d7616955f1fcd838d57dc3bbfdf7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __SSE__
#warning SSE is available
typedef double v2df __attribute__((vector_size(16)));
#endif
#ifdef __AVX__
#warning AVX is available
typedef double v4df __attribute__((vector_size(32)));
#endif
#ifdef __AVX2__
#warning AVX2 is available
#endif
#include <cstdio>
#include "vector3.h"
#define CUDA_TITAN
#include "hermite4.h"
// #include "hermite4-titan.h"
#include "cuda-common.hu"
__device__ __forceinline__ void predict_one(
const double tsys,
const Gravity::GParticle &p,
Gravity::GPredictor &pr)
{
const double dt = tsys - p.tlast;
const double dt2 = (1./2.) * dt;
const double dt3 = (1./3.) * dt;
double3 pos, vel;
pos.x =
p.pos.x + dt *(
p.vel.x + dt2*(
p.acc.x + dt3*(
p.jrk.x )));
pos.y =
p.pos.y + dt *(
p.vel.y + dt2*(
p.acc.y + dt3*(
p.jrk.y )));
pos.z =
p.pos.z + dt *(
p.vel.z + dt2*(
p.acc.z + dt3*(
p.jrk.z )));
vel.x =
p.vel.x + dt *(
p.acc.x + dt2*(
p.jrk.x ));
vel.y =
p.vel.y + dt *(
p.acc.y + dt2*(
p.jrk.y ));
vel.z =
p.vel.z + dt *(
p.acc.z + dt2*(
p.jrk.z ));
pr.pos = pos;
pr.mass = p.mass;
pr.vel = vel;
}
#if 0 // naive version
__global__ void predict_kernel(
const int nbody,
const Gravity::GParticle *ptcl,
Gravity::GPredictor *pred,
const double tsys)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < nbody){
Gravity::GParticle p = ptcl[tid];
Gravity::GPredictor &pr = pred[tid];
predict_one(tsys, p, pr);
}
}
#else // specialized for 32 threads
// 14N DP -> 7N DP
__global__ void predict_kernel(
const int nbody,
const Gravity::GParticle *ptcl,
Gravity::GPredictor *pred,
const double tsys)
{
const int tid = threadIdx.x;
const int off = blockDim.x * blockIdx.x;
__shared__ Gravity::GParticle pshare[32];
Gravity::GPredictor *prbuf = (Gravity::GPredictor *)pshare;
static_memcpy<double2, 32*7, 32> (pshare, ptcl+off);
Gravity::GPredictor pr;
predict_one(tsys, pshare[tid], pr);
prbuf[tid] = pr;
static_memcpy<double, 32*7, 32> (pred+off, prbuf);
}
#endif
void Gravity::predict_all(const double tsys){
ptcl.htod(njpsend);
// printf("sent %d stars\n", njpsend);
const int ntpred = 32;
const int nblock = (nbody/ntpred) +
((nbody%ntpred) ? 1 : 0);
hipLaunchKernelGGL(( predict_kernel) , dim3(nblock), dim3(ntpred), 0, 0,
nbody, ptcl, pred, tsys);
// pred.dtoh(); // THIS DEBUGGING LINE WAS THE BOTTLENECK
// puts("pred all done");
hipDeviceSynchronize(); // for profiling
}
enum{
NJBLOCK = Gravity::NJBLOCK,
};
__device__ __forceinline__ void pp_interact(
const Gravity::GPredictor &ipred,
const Gravity::GPredictor &jpred,
const double eps2,
double3 &acc,
double3 &jrk)
{
const double dx = jpred.pos.x - ipred.pos.x;
const double dy = jpred.pos.y - ipred.pos.y;
const double dz = jpred.pos.z - ipred.pos.z;
const double dvx = jpred.vel.x - ipred.vel.x;
const double dvy = jpred.vel.y - ipred.vel.y;
const double dvz = jpred.vel.z - ipred.vel.z;
const double mj = jpred.mass;
const double dr2 = eps2 + dx*dx + dy*dy + dz*dz;
const double drdv = dx*dvx + dy*dvy + dz*dvz;
// const double rinv1 = rsqrt(dr2);
const double rinv1 = rsqrt_x3(dr2);
const double rinv2 = rinv1 * rinv1;
const double mrinv3 = mj * rinv1 * rinv2;
double alpha = drdv * rinv2;
alpha *= -3.0;
acc.x += mrinv3 * dx;
acc.y += mrinv3 * dy;
acc.z += mrinv3 * dz;
jrk.x += mrinv3 * (dvx + alpha * dx);
jrk.y += mrinv3 * (dvy + alpha * dy);
jrk.z += mrinv3 * (dvz + alpha * dz);
}
#if 0 // first version
__global__ void force_kernel(
const int is,
const int ie,
const int nj,
const Gravity::GPredictor *pred,
const double eps2,
Gravity::GForce (*fo)[NJBLOCK])
{
const int xid = threadIdx.x + blockDim.x * blockIdx.x;
const int yid = blockIdx.y;
const int js = ((0 + yid) * nj) / NJBLOCK;
const int je = ((1 + yid) * nj) / NJBLOCK;
const int i = is + xid;
if(i < ie){
const Gravity::GPredictor ipred = pred[i];
double3 acc = make_double3(0.0, 0.0, 0.0);
double3 jrk = make_double3(0.0, 0.0, 0.0);
#pragma unroll 4
for(int j=js; j<je; j++){
const Gravity::GPredictor &jpred = pred[j];
pp_interact(ipred, jpred, eps2, acc, jrk);
}
fo[xid][yid].acc = acc;
fo[xid][yid].jrk = jrk;
}
}
#else
__global__ void force_kernel(
const int is,
const int ie,
const int nj,
const Gravity::GPredictor *pred,
const double eps2,
Gravity::GForce (*fo)[NJBLOCK])
{
// const int tid = threadIdx.x;
const int xid = threadIdx.x + blockDim.x * blockIdx.x;
const int yid = blockIdx.y;
const int js = ((0 + yid) * nj) / NJBLOCK;
const int je = ((1 + yid) * nj) / NJBLOCK;
const int je8 = js + 8*((je-js)/8);
const int i = is + xid;
__shared__ Gravity::GPredictor jpsh[8];
const Gravity::GPredictor ipred = pred[i];
double3 acc = make_double3(0.0, 0.0, 0.0);
double3 jrk = make_double3(0.0, 0.0, 0.0);
for(int j=js; j<je8; j+=8){
__syncthreads();
static_memcpy<double, 56, Gravity::NTHREAD> (jpsh, pred + j);
// 56 = sizeof(jpsh)/sizeof(double)
__syncthreads();
#pragma unroll
for(int jj=0; jj<8; jj++){
// const Gravity::GPredictor &jpred = pred[j+jj];
const Gravity::GPredictor &jpred = jpsh[jj];
pp_interact(ipred, jpred, eps2, acc, jrk);
}
}
__syncthreads();
static_memcpy<double, 56, Gravity::NTHREAD> (jpsh, pred + je8);
__syncthreads();
for(int j=je8; j<je; j++){
// const Gravity::GPredictor &jpred = pred[j];
const Gravity::GPredictor &jpred = jpsh[j - je8];
pp_interact(ipred, jpred, eps2, acc, jrk);
}
if(i < ie){
fo[xid][yid].acc = acc;
fo[xid][yid].jrk = jrk;
}
}
#endif
#if 0 // was slower
enum{
NXTH = 32,
NYTH = 4,
};
__global__ void force_kernel_warp(
const int is,
const int ie,
const int nj,
const Gravity::GPredictor *pred,
const double eps2,
Gravity::GForce (*fo)[NJBLOCK])
{
const int tid = threadIdx.x;
const int uid = threadIdx.y;
const int xid = threadIdx.x + blockDim.x * blockIdx.x;
const int yid = threadIdx.y + blockDim.y * blockIdx.y;
const int js = ((0 + yid) * nj) / (NJBLOCK*NYTH);
const int je = ((1 + yid) * nj) / (NJBLOCK*NYTH);
const int je8 = js + 8*((je-js)/8);
const int i = is + xid;
__shared__ Gravity::GPredictor jpsh[NYTH][8];
const Gravity::GPredictor ipred = pred[i];
double3 acc = make_double3(0.0, 0.0, 0.0);
double3 jrk = make_double3(0.0, 0.0, 0.0);
for(int j=js; j<je8; j+=8){
static_memcpy<double, 56, Gravity::NTHREAD> (jpsh[uid], pred + j);
// 56 = sizeof(jpsh)/sizeof(double)
#pragma unroll
for(int jj=0; jj<8; jj++){
// const Gravity::GPredictor &jpred = pred[j+jj];
const Gravity::GPredictor &jpred = jpsh[uid][jj];
pp_interact(ipred, jpred, eps2, acc, jrk);
}
}
static_memcpy<double, 56, Gravity::NTHREAD> (jpsh[uid], pred + je8);
for(int j=je8; j<je; j++){
// const Gravity::GPredictor &jpred = pred[j];
const Gravity::GPredictor &jpred = jpsh[uid][j - je8];
pp_interact(ipred, jpred, eps2, acc, jrk);
}
acc.x = vreduce<NXTH, NYTH> (acc.x, jpsh);
acc.y = vreduce<NXTH, NYTH> (acc.y, jpsh);
acc.z = vreduce<NXTH, NYTH> (acc.z, jpsh);
jrk.x = vreduce<NXTH, NYTH> (jrk.x, jpsh);
jrk.y = vreduce<NXTH, NYTH> (jrk.y, jpsh);
jrk.z = vreduce<NXTH, NYTH> (jrk.z, jpsh);
if(i < ie && 0==uid){
fo[xid][yid].acc = acc;
fo[xid][yid].jrk = jrk;
}
}
#endif
template<>
__device__ void reduce_final<1, 6>(const double x, double *dst){
const int yid = threadIdx.y;
dst[yid] = x;
}
__global__ void reduce_kernel(
const Gravity::GForce (*fpart)[NJBLOCK],
Gravity::GForce *ftot)
{
const int bid = blockIdx.x; // for particle
const int xid = threadIdx.x; // for 56 partial force
const int yid = threadIdx.y; // for 6 elements of Force
const Gravity::GForce &fsrc = fpart[bid][xid];
const double *dsrc = (const double *)(&fsrc);
const double x = xid<NJBLOCK ? dsrc[yid] : 0.0;
const double y = warp_reduce_double(x);
Gravity::GForce &fdst = ftot[bid];
double *ddst = (double *)(&fdst);
reduce_final<Gravity::NJREDUCE/32, 6> (y, ddst);
}
void Gravity::calc_force_in_range(
const int is,
const int ie,
const double eps2,
Force force[] )
{
assert(56 == sizeof(GPredictor));
const int ni = ie - is;
{
const int niblock = (ni/NTHREAD) +
((ni%NTHREAD) ? 1 : 0);
dim3 grid(niblock, NJBLOCK, 1);
hipLaunchKernelGGL(( force_kernel) , dim3(grid), dim3(NTHREAD), 0, 0,
is, ie, nbody, pred, eps2, fpart);
}
{
// const int nwarp = 32;
const int nword = sizeof(GForce) / sizeof(double);
assert(6 == nword);
hipLaunchKernelGGL(( reduce_kernel) , dim3(ni), dim3(dim3(NJREDUCE, nword, 1)), 0, 0,
fpart, ftot);
}
ftot.dtoh(ni);
for(int i=0; i<ni; i++){
GForce f = ftot[i];
force[is+i].acc = f.acc;
force[is+i].jrk = f.jrk;
}
}
// optimization for overlapping
void Gravity::calc_force_on_first_nact(
const int nact,
const double eps2,
Force force[] )
{
int istore = 0;
int nistore = 0;
for(int ii=0; ii<nact; ii+=NIMAX){
const int ni = (nact-ii) < NIMAX ? (nact-ii) : NIMAX;
// calc_force_in_range(ii, ii+ni, eps2, force);
{ // partial force calcculation
const int is = ii;
const int ie = is + ni;
#if 1
const int niblock = (ni/NTHREAD) +
((ni%NTHREAD) ? 1 : 0);
dim3 grid(niblock, NJBLOCK, 1);
hipLaunchKernelGGL(( force_kernel) , dim3(grid), dim3(NTHREAD), 0, 0,
is, ie, nbody, pred, eps2, fpart);
#else
const int niblock = (ni/32) +
((ni%32) ? 1 : 0);
dim3 grid(niblock, NJBLOCK, 1);
dim3 thread(NXTH, NYTH, 1);
hipLaunchKernelGGL(( force_kernel_warp) , dim3(grid), dim3(thread), 0, 0,
is, ie, nbody, pred, eps2, fpart);
#endif
}
for(int i=0; i<nistore; i++){
GForce f = ftot[i];
force[istore+i].acc = f.acc;
force[istore+i].jrk = f.jrk;
}
{ // reduction
const int nword = sizeof(GForce) / sizeof(double);
assert(6 == nword);
hipLaunchKernelGGL(( reduce_kernel) , dim3(ni), dim3(dim3(NJREDUCE, nword, 1)), 0, 0,
fpart, ftot);
}
ftot.dtoh(ni);
istore = ii;
nistore = ni;
}
for(int i=0; i<nistore; i++){
GForce f = ftot[i];
force[istore+i].acc = f.acc;
force[istore+i].jrk = f.jrk;
}
this->njpsend = nact;
}
#include "pot-titan.hu"
| 968c0b782fe2d7616955f1fcd838d57dc3bbfdf7.cu | #ifdef __SSE__
#warning SSE is available
typedef double v2df __attribute__((vector_size(16)));
#endif
#ifdef __AVX__
#warning AVX is available
typedef double v4df __attribute__((vector_size(32)));
#endif
#ifdef __AVX2__
#warning AVX2 is available
#endif
#include <cstdio>
#include "vector3.h"
#define CUDA_TITAN
#include "hermite4.h"
// #include "hermite4-titan.h"
#include "cuda-common.hu"
__device__ __forceinline__ void predict_one(
const double tsys,
const Gravity::GParticle &p,
Gravity::GPredictor &pr)
{
const double dt = tsys - p.tlast;
const double dt2 = (1./2.) * dt;
const double dt3 = (1./3.) * dt;
double3 pos, vel;
pos.x =
p.pos.x + dt *(
p.vel.x + dt2*(
p.acc.x + dt3*(
p.jrk.x )));
pos.y =
p.pos.y + dt *(
p.vel.y + dt2*(
p.acc.y + dt3*(
p.jrk.y )));
pos.z =
p.pos.z + dt *(
p.vel.z + dt2*(
p.acc.z + dt3*(
p.jrk.z )));
vel.x =
p.vel.x + dt *(
p.acc.x + dt2*(
p.jrk.x ));
vel.y =
p.vel.y + dt *(
p.acc.y + dt2*(
p.jrk.y ));
vel.z =
p.vel.z + dt *(
p.acc.z + dt2*(
p.jrk.z ));
pr.pos = pos;
pr.mass = p.mass;
pr.vel = vel;
}
#if 0 // naive version
__global__ void predict_kernel(
const int nbody,
const Gravity::GParticle *ptcl,
Gravity::GPredictor *pred,
const double tsys)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < nbody){
Gravity::GParticle p = ptcl[tid];
Gravity::GPredictor &pr = pred[tid];
predict_one(tsys, p, pr);
}
}
#else // specialized for 32 threads
// 14N DP -> 7N DP
__global__ void predict_kernel(
const int nbody,
const Gravity::GParticle *ptcl,
Gravity::GPredictor *pred,
const double tsys)
{
const int tid = threadIdx.x;
const int off = blockDim.x * blockIdx.x;
__shared__ Gravity::GParticle pshare[32];
Gravity::GPredictor *prbuf = (Gravity::GPredictor *)pshare;
static_memcpy<double2, 32*7, 32> (pshare, ptcl+off);
Gravity::GPredictor pr;
predict_one(tsys, pshare[tid], pr);
prbuf[tid] = pr;
static_memcpy<double, 32*7, 32> (pred+off, prbuf);
}
#endif
void Gravity::predict_all(const double tsys){
ptcl.htod(njpsend);
// printf("sent %d stars\n", njpsend);
const int ntpred = 32;
const int nblock = (nbody/ntpred) +
((nbody%ntpred) ? 1 : 0);
predict_kernel <<<nblock, ntpred>>>
(nbody, ptcl, pred, tsys);
// pred.dtoh(); // THIS DEBUGGING LINE WAS THE BOTTLENECK
// puts("pred all done");
cudaThreadSynchronize(); // for profiling
}
enum{
NJBLOCK = Gravity::NJBLOCK,
};
__device__ __forceinline__ void pp_interact(
const Gravity::GPredictor &ipred,
const Gravity::GPredictor &jpred,
const double eps2,
double3 &acc,
double3 &jrk)
{
const double dx = jpred.pos.x - ipred.pos.x;
const double dy = jpred.pos.y - ipred.pos.y;
const double dz = jpred.pos.z - ipred.pos.z;
const double dvx = jpred.vel.x - ipred.vel.x;
const double dvy = jpred.vel.y - ipred.vel.y;
const double dvz = jpred.vel.z - ipred.vel.z;
const double mj = jpred.mass;
const double dr2 = eps2 + dx*dx + dy*dy + dz*dz;
const double drdv = dx*dvx + dy*dvy + dz*dvz;
// const double rinv1 = rsqrt(dr2);
const double rinv1 = rsqrt_x3(dr2);
const double rinv2 = rinv1 * rinv1;
const double mrinv3 = mj * rinv1 * rinv2;
double alpha = drdv * rinv2;
alpha *= -3.0;
acc.x += mrinv3 * dx;
acc.y += mrinv3 * dy;
acc.z += mrinv3 * dz;
jrk.x += mrinv3 * (dvx + alpha * dx);
jrk.y += mrinv3 * (dvy + alpha * dy);
jrk.z += mrinv3 * (dvz + alpha * dz);
}
#if 0 // first version
__global__ void force_kernel(
const int is,
const int ie,
const int nj,
const Gravity::GPredictor *pred,
const double eps2,
Gravity::GForce (*fo)[NJBLOCK])
{
const int xid = threadIdx.x + blockDim.x * blockIdx.x;
const int yid = blockIdx.y;
const int js = ((0 + yid) * nj) / NJBLOCK;
const int je = ((1 + yid) * nj) / NJBLOCK;
const int i = is + xid;
if(i < ie){
const Gravity::GPredictor ipred = pred[i];
double3 acc = make_double3(0.0, 0.0, 0.0);
double3 jrk = make_double3(0.0, 0.0, 0.0);
#pragma unroll 4
for(int j=js; j<je; j++){
const Gravity::GPredictor &jpred = pred[j];
pp_interact(ipred, jpred, eps2, acc, jrk);
}
fo[xid][yid].acc = acc;
fo[xid][yid].jrk = jrk;
}
}
#else
__global__ void force_kernel(
const int is,
const int ie,
const int nj,
const Gravity::GPredictor *pred,
const double eps2,
Gravity::GForce (*fo)[NJBLOCK])
{
// const int tid = threadIdx.x;
const int xid = threadIdx.x + blockDim.x * blockIdx.x;
const int yid = blockIdx.y;
const int js = ((0 + yid) * nj) / NJBLOCK;
const int je = ((1 + yid) * nj) / NJBLOCK;
const int je8 = js + 8*((je-js)/8);
const int i = is + xid;
__shared__ Gravity::GPredictor jpsh[8];
const Gravity::GPredictor ipred = pred[i];
double3 acc = make_double3(0.0, 0.0, 0.0);
double3 jrk = make_double3(0.0, 0.0, 0.0);
for(int j=js; j<je8; j+=8){
__syncthreads();
static_memcpy<double, 56, Gravity::NTHREAD> (jpsh, pred + j);
// 56 = sizeof(jpsh)/sizeof(double)
__syncthreads();
#pragma unroll
for(int jj=0; jj<8; jj++){
// const Gravity::GPredictor &jpred = pred[j+jj];
const Gravity::GPredictor &jpred = jpsh[jj];
pp_interact(ipred, jpred, eps2, acc, jrk);
}
}
__syncthreads();
static_memcpy<double, 56, Gravity::NTHREAD> (jpsh, pred + je8);
__syncthreads();
for(int j=je8; j<je; j++){
// const Gravity::GPredictor &jpred = pred[j];
const Gravity::GPredictor &jpred = jpsh[j - je8];
pp_interact(ipred, jpred, eps2, acc, jrk);
}
if(i < ie){
fo[xid][yid].acc = acc;
fo[xid][yid].jrk = jrk;
}
}
#endif
#if 0 // was slower
enum{
NXTH = 32,
NYTH = 4,
};
__global__ void force_kernel_warp(
const int is,
const int ie,
const int nj,
const Gravity::GPredictor *pred,
const double eps2,
Gravity::GForce (*fo)[NJBLOCK])
{
const int tid = threadIdx.x;
const int uid = threadIdx.y;
const int xid = threadIdx.x + blockDim.x * blockIdx.x;
const int yid = threadIdx.y + blockDim.y * blockIdx.y;
const int js = ((0 + yid) * nj) / (NJBLOCK*NYTH);
const int je = ((1 + yid) * nj) / (NJBLOCK*NYTH);
const int je8 = js + 8*((je-js)/8);
const int i = is + xid;
__shared__ Gravity::GPredictor jpsh[NYTH][8];
const Gravity::GPredictor ipred = pred[i];
double3 acc = make_double3(0.0, 0.0, 0.0);
double3 jrk = make_double3(0.0, 0.0, 0.0);
for(int j=js; j<je8; j+=8){
static_memcpy<double, 56, Gravity::NTHREAD> (jpsh[uid], pred + j);
// 56 = sizeof(jpsh)/sizeof(double)
#pragma unroll
for(int jj=0; jj<8; jj++){
// const Gravity::GPredictor &jpred = pred[j+jj];
const Gravity::GPredictor &jpred = jpsh[uid][jj];
pp_interact(ipred, jpred, eps2, acc, jrk);
}
}
static_memcpy<double, 56, Gravity::NTHREAD> (jpsh[uid], pred + je8);
for(int j=je8; j<je; j++){
// const Gravity::GPredictor &jpred = pred[j];
const Gravity::GPredictor &jpred = jpsh[uid][j - je8];
pp_interact(ipred, jpred, eps2, acc, jrk);
}
acc.x = vreduce<NXTH, NYTH> (acc.x, jpsh);
acc.y = vreduce<NXTH, NYTH> (acc.y, jpsh);
acc.z = vreduce<NXTH, NYTH> (acc.z, jpsh);
jrk.x = vreduce<NXTH, NYTH> (jrk.x, jpsh);
jrk.y = vreduce<NXTH, NYTH> (jrk.y, jpsh);
jrk.z = vreduce<NXTH, NYTH> (jrk.z, jpsh);
if(i < ie && 0==uid){
fo[xid][yid].acc = acc;
fo[xid][yid].jrk = jrk;
}
}
#endif
template<>
__device__ void reduce_final<1, 6>(const double x, double *dst){
const int yid = threadIdx.y;
dst[yid] = x;
}
__global__ void reduce_kernel(
const Gravity::GForce (*fpart)[NJBLOCK],
Gravity::GForce *ftot)
{
const int bid = blockIdx.x; // for particle
const int xid = threadIdx.x; // for 56 partial force
const int yid = threadIdx.y; // for 6 elements of Force
const Gravity::GForce &fsrc = fpart[bid][xid];
const double *dsrc = (const double *)(&fsrc);
const double x = xid<NJBLOCK ? dsrc[yid] : 0.0;
const double y = warp_reduce_double(x);
Gravity::GForce &fdst = ftot[bid];
double *ddst = (double *)(&fdst);
reduce_final<Gravity::NJREDUCE/32, 6> (y, ddst);
}
void Gravity::calc_force_in_range(
const int is,
const int ie,
const double eps2,
Force force[] )
{
assert(56 == sizeof(GPredictor));
const int ni = ie - is;
{
const int niblock = (ni/NTHREAD) +
((ni%NTHREAD) ? 1 : 0);
dim3 grid(niblock, NJBLOCK, 1);
force_kernel <<<grid, NTHREAD>>>
(is, ie, nbody, pred, eps2, fpart);
}
{
// const int nwarp = 32;
const int nword = sizeof(GForce) / sizeof(double);
assert(6 == nword);
reduce_kernel <<<ni, dim3(NJREDUCE, nword, 1)>>>
(fpart, ftot);
}
ftot.dtoh(ni);
for(int i=0; i<ni; i++){
GForce f = ftot[i];
force[is+i].acc = f.acc;
force[is+i].jrk = f.jrk;
}
}
// optimization for overlapping
void Gravity::calc_force_on_first_nact(
const int nact,
const double eps2,
Force force[] )
{
int istore = 0;
int nistore = 0;
for(int ii=0; ii<nact; ii+=NIMAX){
const int ni = (nact-ii) < NIMAX ? (nact-ii) : NIMAX;
// calc_force_in_range(ii, ii+ni, eps2, force);
{ // partial force calcculation
const int is = ii;
const int ie = is + ni;
#if 1
const int niblock = (ni/NTHREAD) +
((ni%NTHREAD) ? 1 : 0);
dim3 grid(niblock, NJBLOCK, 1);
force_kernel <<<grid, NTHREAD>>>
(is, ie, nbody, pred, eps2, fpart);
#else
const int niblock = (ni/32) +
((ni%32) ? 1 : 0);
dim3 grid(niblock, NJBLOCK, 1);
dim3 thread(NXTH, NYTH, 1);
force_kernel_warp <<<grid, thread>>>
(is, ie, nbody, pred, eps2, fpart);
#endif
}
for(int i=0; i<nistore; i++){
GForce f = ftot[i];
force[istore+i].acc = f.acc;
force[istore+i].jrk = f.jrk;
}
{ // reduction
const int nword = sizeof(GForce) / sizeof(double);
assert(6 == nword);
reduce_kernel <<<ni, dim3(NJREDUCE, nword, 1)>>>
(fpart, ftot);
}
ftot.dtoh(ni);
istore = ii;
nistore = ni;
}
for(int i=0; i<nistore; i++){
GForce f = ftot[i];
force[istore+i].acc = f.acc;
force[istore+i].jrk = f.jrk;
}
this->njpsend = nact;
}
#include "pot-titan.hu"
|
ea6c957e22d852f89e25dbf3ccceda080b79067d.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_of_float16_cuda
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
#include <hip/hip_fp16.h>
#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
template<typename>
void test_cuda_numext() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
bool* d_res_half = (bool*)gpu_device.allocate(num_elem * sizeof(bool));
bool* d_res_float = (bool*)gpu_device.allocate(num_elem * sizeof(bool));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
d_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_res_half(
d_res_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_res_float(
d_res_float, num_elem);
gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f);
gpu_res_float.device(gpu_device) = gpu_float.unaryExpr(Eigen::internal::scalar_isnan_op<float>());
gpu_res_half.device(gpu_device) = gpu_float.cast<Eigen::half>().unaryExpr(Eigen::internal::scalar_isnan_op<Eigen::half>());
Tensor<bool, 1> half_prec(num_elem);
Tensor<bool, 1> full_prec(num_elem);
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(bool));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(bool));
gpu_device.synchronize();
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking numext " << i << std::endl;
VERIFY_IS_EQUAL(full_prec(i), half_prec(i));
}
gpu_device.deallocate(d_float);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
#ifdef EIGEN_HAS_CUDA_FP16
template<typename>
void test_cuda_conversion() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
d_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_half(
d_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_conv(
d_conv, num_elem);
gpu_float.device(gpu_device) = gpu_float.random();
gpu_half.device(gpu_device) = gpu_float.cast<Eigen::half>();
gpu_conv.device(gpu_device) = gpu_half.cast<float>();
Tensor<float, 1> initial(num_elem);
Tensor<float, 1> final(num_elem);
gpu_device.memcpyDeviceToHost(initial.data(), d_float, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(final.data(), d_conv, num_elem*sizeof(float));
for (int i = 0; i < num_elem; ++i) {
VERIFY_IS_APPROX(initial(i), final(i));
}
gpu_device.deallocate(d_float);
gpu_device.deallocate(d_half);
gpu_device.deallocate(d_conv);
}
template<typename>
void test_cuda_unary() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_half = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
d_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half(
d_res_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float(
d_res_float, num_elem);
gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f);
gpu_res_float.device(gpu_device) = gpu_float.abs();
gpu_res_half.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().cast<float>();
Tensor<float, 1> half_prec(num_elem);
Tensor<float, 1> full_prec(num_elem);
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float));
gpu_device.synchronize();
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking unary " << i << std::endl;
VERIFY_IS_APPROX(full_prec(i), half_prec(i));
}
gpu_device.deallocate(d_float);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
template<typename>
void test_cuda_elementwise() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_half = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float1(
d_float1, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float2(
d_float2, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half(
d_res_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float(
d_res_float, num_elem);
gpu_float1.device(gpu_device) = gpu_float1.random();
gpu_float2.device(gpu_device) = gpu_float2.random();
gpu_res_float.device(gpu_device) = (gpu_float1 + gpu_float2) * gpu_float1;
gpu_res_half.device(gpu_device) = ((gpu_float1.cast<Eigen::half>() + gpu_float2.cast<Eigen::half>()) * gpu_float1.cast<Eigen::half>()).cast<float>();
Tensor<float, 1> half_prec(num_elem);
Tensor<float, 1> full_prec(num_elem);
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float));
gpu_device.synchronize();
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking elemwise " << i << ": full prec = " << full_prec(i) << " vs half prec = " << half_prec(i) << std::endl;
VERIFY_IS_APPROX(static_cast<Eigen::half>(full_prec(i)), static_cast<Eigen::half>(half_prec(i)));
}
gpu_device.deallocate(d_float1);
gpu_device.deallocate(d_float2);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
template<typename>
void test_cuda_trancendental() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float3 = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_res1_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res1_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res2_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res2_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res3_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res3_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float1(d_float1, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float2(d_float2, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float3(d_float3, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res1_half(d_res1_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res1_float(d_res1_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res2_half(d_res2_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res2_float(d_res2_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res3_half(d_res3_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res3_float(d_res3_float, num_elem);
gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f);
gpu_float2.device(gpu_device) = gpu_float2.random() + gpu_float1.constant(0.5f);
gpu_float3.device(gpu_device) = gpu_float3.random();
gpu_res1_float.device(gpu_device) = gpu_float1.exp().cast<Eigen::half>();
gpu_res2_float.device(gpu_device) = gpu_float2.log().cast<Eigen::half>();
gpu_res3_float.device(gpu_device) = gpu_float3.log1p().cast<Eigen::half>();
gpu_res1_half.device(gpu_device) = gpu_float1.cast<Eigen::half>();
gpu_res1_half.device(gpu_device) = gpu_res1_half.exp();
gpu_res2_half.device(gpu_device) = gpu_float2.cast<Eigen::half>();
gpu_res2_half.device(gpu_device) = gpu_res2_half.log();
gpu_res3_half.device(gpu_device) = gpu_float3.cast<Eigen::half>();
gpu_res3_half.device(gpu_device) = gpu_res3_half.log1p();
Tensor<float, 1> input1(num_elem);
Tensor<Eigen::half, 1> half_prec1(num_elem);
Tensor<Eigen::half, 1> full_prec1(num_elem);
Tensor<float, 1> input2(num_elem);
Tensor<Eigen::half, 1> half_prec2(num_elem);
Tensor<Eigen::half, 1> full_prec2(num_elem);
Tensor<float, 1> input3(num_elem);
Tensor<Eigen::half, 1> half_prec3(num_elem);
Tensor<Eigen::half, 1> full_prec3(num_elem);
gpu_device.memcpyDeviceToHost(input1.data(), d_float1, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(input2.data(), d_float2, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(input3.data(), d_float3, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res1_half, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec1.data(), d_res1_float, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res2_half, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec2.data(), d_res2_float, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(half_prec3.data(), d_res3_half, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec3.data(), d_res3_float, num_elem*sizeof(Eigen::half));
gpu_device.synchronize();
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking elemwise exp " << i << " input = " << input1(i) << " full = " << full_prec1(i) << " half = " << half_prec1(i) << std::endl;
VERIFY_IS_APPROX(full_prec1(i), half_prec1(i));
}
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking elemwise log " << i << " input = " << input2(i) << " full = " << full_prec2(i) << " half = " << half_prec2(i) << std::endl;
if(std::abs(input2(i)-1.f)<0.05f) // log lacks accurary nearby 1
VERIFY_IS_APPROX(full_prec2(i)+Eigen::half(0.1f), half_prec2(i)+Eigen::half(0.1f));
else
VERIFY_IS_APPROX(full_prec2(i), half_prec2(i));
}
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking elemwise plog1 " << i << " input = " << input3(i) << " full = " << full_prec3(i) << " half = " << half_prec3(i) << std::endl;
VERIFY_IS_APPROX(full_prec3(i), half_prec3(i));
}
gpu_device.deallocate(d_float1);
gpu_device.deallocate(d_float2);
gpu_device.deallocate(d_float3);
gpu_device.deallocate(d_res1_half);
gpu_device.deallocate(d_res1_float);
gpu_device.deallocate(d_res2_half);
gpu_device.deallocate(d_res2_float);
gpu_device.deallocate(d_res3_float);
gpu_device.deallocate(d_res3_half);
}
template<typename>
void test_cuda_contractions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int rows = 23;
int cols = 23;
int num_elem = rows*cols;
float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1(
d_float1, rows, cols);
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2(
d_float2, rows, cols);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 2>, Eigen::Aligned> gpu_res_half(
d_res_half, rows, cols);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 2>, Eigen::Aligned> gpu_res_float(
d_res_float, rows, cols);
gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f);
gpu_float2.device(gpu_device) = gpu_float2.random() - gpu_float2.constant(0.5f);
typedef Tensor<float, 2>::DimensionPair DimPair;
Eigen::array<DimPair, 1> dims(DimPair(1, 0));
gpu_res_float.device(gpu_device) = gpu_float1.contract(gpu_float2, dims).cast<Eigen::half>();
gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().contract(gpu_float2.cast<Eigen::half>(), dims);
Tensor<Eigen::half, 2> half_prec(rows, cols);
Tensor<Eigen::half, 2> full_prec(rows, cols);
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(Eigen::half));
gpu_device.synchronize();
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
std::cout << "Checking contract " << i << " " << j << full_prec(i, j) << " " << half_prec(i, j) << std::endl;
if (numext::abs(full_prec(i, j) - half_prec(i, j)) > Eigen::half(1e-2f)) {
VERIFY_IS_APPROX(full_prec(i, j), half_prec(i, j));
}
}
}
gpu_device.deallocate(d_float1);
gpu_device.deallocate(d_float2);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
template<typename>
void test_cuda_reductions(int size1, int size2, int redux) {
std::cout << "Reducing " << size1 << " by " << size2
<< " tensor along dim " << redux << std::endl;
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = size1*size2;
int result_size = (redux == 1 ? size1 : size2);
float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(result_size * sizeof(Eigen::half));
Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(result_size * sizeof(Eigen::half));
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1(
d_float1, size1, size2);
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2(
d_float2, size1, size2);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res_half(
d_res_half, result_size);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res_float(
d_res_float, result_size);
gpu_float1.device(gpu_device) = gpu_float1.random() * 2.0f;
gpu_float2.device(gpu_device) = gpu_float2.random() * 2.0f;
Eigen::array<int, 1> redux_dim = {{redux}};
gpu_res_float.device(gpu_device) = gpu_float1.sum(redux_dim).cast<Eigen::half>();
gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().sum(redux_dim);
Tensor<Eigen::half, 1> half_prec(result_size);
Tensor<Eigen::half, 1> full_prec(result_size);
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, result_size*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, result_size*sizeof(Eigen::half));
gpu_device.synchronize();
for (int i = 0; i < result_size; ++i) {
std::cout << "EXPECTED " << full_prec(i) << " GOT " << half_prec(i) << std::endl;
VERIFY_IS_APPROX(full_prec(i), half_prec(i));
}
gpu_device.deallocate(d_float1);
gpu_device.deallocate(d_float2);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
template<typename>
void test_cuda_reductions() {
test_cuda_reductions<void>(13, 13, 0);
test_cuda_reductions<void>(13, 13, 1);
test_cuda_reductions<void>(35, 36, 0);
test_cuda_reductions<void>(35, 36, 1);
test_cuda_reductions<void>(36, 35, 0);
test_cuda_reductions<void>(36, 35, 1);
}
template<typename>
void test_cuda_full_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int size = 13;
int num_elem = size*size;
float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(1 * sizeof(Eigen::half));
Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(1 * sizeof(Eigen::half));
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1(
d_float1, size, size);
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2(
d_float2, size, size);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 0>, Eigen::Aligned> gpu_res_half(
d_res_half);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 0>, Eigen::Aligned> gpu_res_float(
d_res_float);
gpu_float1.device(gpu_device) = gpu_float1.random();
gpu_float2.device(gpu_device) = gpu_float2.random();
gpu_res_float.device(gpu_device) = gpu_float1.sum().cast<Eigen::half>();
gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().sum();
Tensor<Eigen::half, 0> half_prec;
Tensor<Eigen::half, 0> full_prec;
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::half));
gpu_device.synchronize();
VERIFY_IS_APPROX(full_prec(), half_prec());
gpu_res_float.device(gpu_device) = gpu_float1.maximum().cast<Eigen::half>();
gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().maximum();
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::half));
gpu_device.synchronize();
VERIFY_IS_APPROX(full_prec(), half_prec());
gpu_device.deallocate(d_float1);
gpu_device.deallocate(d_float2);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
template<typename>
void test_cuda_forced_evals() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_half1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_half2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
d_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half1(
d_res_half1, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Unaligned> gpu_res_half2(
d_res_half2, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float(
d_res_float, num_elem);
Eigen::array<int, 1> no_bcast;
no_bcast[0] = 1;
gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f);
gpu_res_float.device(gpu_device) = gpu_float.abs();
gpu_res_half1.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().eval().cast<float>();
gpu_res_half2.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().broadcast(no_bcast).eval().cast<float>();
Tensor<float, 1> half_prec1(num_elem);
Tensor<float, 1> half_prec2(num_elem);
Tensor<float, 1> full_prec(num_elem);
gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res_half1, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res_half1, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float));
gpu_device.synchronize();
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking forced eval " << i << full_prec(i) << " vs " << half_prec1(i) << " vs " << half_prec2(i) << std::endl;
VERIFY_IS_APPROX(full_prec(i), half_prec1(i));
VERIFY_IS_APPROX(full_prec(i), half_prec2(i));
}
gpu_device.deallocate(d_float);
gpu_device.deallocate(d_res_half1);
gpu_device.deallocate(d_res_half2);
gpu_device.deallocate(d_res_float);
}
#endif
void test_cxx11_tensor_of_float16_cuda()
{
CALL_SUBTEST_1(test_cuda_numext<void>());
#ifdef EIGEN_HAS_CUDA_FP16
CALL_SUBTEST_1(test_cuda_conversion<void>());
CALL_SUBTEST_1(test_cuda_unary<void>());
CALL_SUBTEST_1(test_cuda_elementwise<void>());
CALL_SUBTEST_1(test_cuda_trancendental<void>());
CALL_SUBTEST_2(test_cuda_contractions<void>());
CALL_SUBTEST_3(test_cuda_reductions<void>());
CALL_SUBTEST_4(test_cuda_full_reductions<void>());
CALL_SUBTEST_5(test_cuda_forced_evals<void>());
#else
std::cout << "Half floats are not supported by this version of cuda: skipping the test" << std::endl;
#endif
}
| ea6c957e22d852f89e25dbf3ccceda080b79067d.cu | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_of_float16_cuda
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
#include <cuda_fp16.h>
#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
template<typename>
void test_cuda_numext() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
bool* d_res_half = (bool*)gpu_device.allocate(num_elem * sizeof(bool));
bool* d_res_float = (bool*)gpu_device.allocate(num_elem * sizeof(bool));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
d_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_res_half(
d_res_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_res_float(
d_res_float, num_elem);
gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f);
gpu_res_float.device(gpu_device) = gpu_float.unaryExpr(Eigen::internal::scalar_isnan_op<float>());
gpu_res_half.device(gpu_device) = gpu_float.cast<Eigen::half>().unaryExpr(Eigen::internal::scalar_isnan_op<Eigen::half>());
Tensor<bool, 1> half_prec(num_elem);
Tensor<bool, 1> full_prec(num_elem);
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(bool));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(bool));
gpu_device.synchronize();
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking numext " << i << std::endl;
VERIFY_IS_EQUAL(full_prec(i), half_prec(i));
}
gpu_device.deallocate(d_float);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
#ifdef EIGEN_HAS_CUDA_FP16
template<typename>
void test_cuda_conversion() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
d_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_half(
d_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_conv(
d_conv, num_elem);
gpu_float.device(gpu_device) = gpu_float.random();
gpu_half.device(gpu_device) = gpu_float.cast<Eigen::half>();
gpu_conv.device(gpu_device) = gpu_half.cast<float>();
Tensor<float, 1> initial(num_elem);
Tensor<float, 1> final(num_elem);
gpu_device.memcpyDeviceToHost(initial.data(), d_float, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(final.data(), d_conv, num_elem*sizeof(float));
for (int i = 0; i < num_elem; ++i) {
VERIFY_IS_APPROX(initial(i), final(i));
}
gpu_device.deallocate(d_float);
gpu_device.deallocate(d_half);
gpu_device.deallocate(d_conv);
}
template<typename>
void test_cuda_unary() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_half = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
d_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half(
d_res_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float(
d_res_float, num_elem);
gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f);
gpu_res_float.device(gpu_device) = gpu_float.abs();
gpu_res_half.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().cast<float>();
Tensor<float, 1> half_prec(num_elem);
Tensor<float, 1> full_prec(num_elem);
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float));
gpu_device.synchronize();
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking unary " << i << std::endl;
VERIFY_IS_APPROX(full_prec(i), half_prec(i));
}
gpu_device.deallocate(d_float);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
template<typename>
void test_cuda_elementwise() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_half = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float1(
d_float1, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float2(
d_float2, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half(
d_res_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float(
d_res_float, num_elem);
gpu_float1.device(gpu_device) = gpu_float1.random();
gpu_float2.device(gpu_device) = gpu_float2.random();
gpu_res_float.device(gpu_device) = (gpu_float1 + gpu_float2) * gpu_float1;
gpu_res_half.device(gpu_device) = ((gpu_float1.cast<Eigen::half>() + gpu_float2.cast<Eigen::half>()) * gpu_float1.cast<Eigen::half>()).cast<float>();
Tensor<float, 1> half_prec(num_elem);
Tensor<float, 1> full_prec(num_elem);
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float));
gpu_device.synchronize();
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking elemwise " << i << ": full prec = " << full_prec(i) << " vs half prec = " << half_prec(i) << std::endl;
VERIFY_IS_APPROX(static_cast<Eigen::half>(full_prec(i)), static_cast<Eigen::half>(half_prec(i)));
}
gpu_device.deallocate(d_float1);
gpu_device.deallocate(d_float2);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
template<typename>
void test_cuda_trancendental() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float3 = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_res1_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res1_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res2_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res2_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res3_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res3_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float1(d_float1, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float2(d_float2, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float3(d_float3, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res1_half(d_res1_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res1_float(d_res1_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res2_half(d_res2_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res2_float(d_res2_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res3_half(d_res3_half, num_elem);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res3_float(d_res3_float, num_elem);
gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f);
gpu_float2.device(gpu_device) = gpu_float2.random() + gpu_float1.constant(0.5f);
gpu_float3.device(gpu_device) = gpu_float3.random();
gpu_res1_float.device(gpu_device) = gpu_float1.exp().cast<Eigen::half>();
gpu_res2_float.device(gpu_device) = gpu_float2.log().cast<Eigen::half>();
gpu_res3_float.device(gpu_device) = gpu_float3.log1p().cast<Eigen::half>();
gpu_res1_half.device(gpu_device) = gpu_float1.cast<Eigen::half>();
gpu_res1_half.device(gpu_device) = gpu_res1_half.exp();
gpu_res2_half.device(gpu_device) = gpu_float2.cast<Eigen::half>();
gpu_res2_half.device(gpu_device) = gpu_res2_half.log();
gpu_res3_half.device(gpu_device) = gpu_float3.cast<Eigen::half>();
gpu_res3_half.device(gpu_device) = gpu_res3_half.log1p();
Tensor<float, 1> input1(num_elem);
Tensor<Eigen::half, 1> half_prec1(num_elem);
Tensor<Eigen::half, 1> full_prec1(num_elem);
Tensor<float, 1> input2(num_elem);
Tensor<Eigen::half, 1> half_prec2(num_elem);
Tensor<Eigen::half, 1> full_prec2(num_elem);
Tensor<float, 1> input3(num_elem);
Tensor<Eigen::half, 1> half_prec3(num_elem);
Tensor<Eigen::half, 1> full_prec3(num_elem);
gpu_device.memcpyDeviceToHost(input1.data(), d_float1, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(input2.data(), d_float2, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(input3.data(), d_float3, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res1_half, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec1.data(), d_res1_float, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res2_half, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec2.data(), d_res2_float, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(half_prec3.data(), d_res3_half, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec3.data(), d_res3_float, num_elem*sizeof(Eigen::half));
gpu_device.synchronize();
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking elemwise exp " << i << " input = " << input1(i) << " full = " << full_prec1(i) << " half = " << half_prec1(i) << std::endl;
VERIFY_IS_APPROX(full_prec1(i), half_prec1(i));
}
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking elemwise log " << i << " input = " << input2(i) << " full = " << full_prec2(i) << " half = " << half_prec2(i) << std::endl;
if(std::abs(input2(i)-1.f)<0.05f) // log lacks accurary nearby 1
VERIFY_IS_APPROX(full_prec2(i)+Eigen::half(0.1f), half_prec2(i)+Eigen::half(0.1f));
else
VERIFY_IS_APPROX(full_prec2(i), half_prec2(i));
}
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking elemwise plog1 " << i << " input = " << input3(i) << " full = " << full_prec3(i) << " half = " << half_prec3(i) << std::endl;
VERIFY_IS_APPROX(full_prec3(i), half_prec3(i));
}
gpu_device.deallocate(d_float1);
gpu_device.deallocate(d_float2);
gpu_device.deallocate(d_float3);
gpu_device.deallocate(d_res1_half);
gpu_device.deallocate(d_res1_float);
gpu_device.deallocate(d_res2_half);
gpu_device.deallocate(d_res2_float);
gpu_device.deallocate(d_res3_float);
gpu_device.deallocate(d_res3_half);
}
template<typename>
void test_cuda_contractions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int rows = 23;
int cols = 23;
int num_elem = rows*cols;
float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half));
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1(
d_float1, rows, cols);
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2(
d_float2, rows, cols);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 2>, Eigen::Aligned> gpu_res_half(
d_res_half, rows, cols);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 2>, Eigen::Aligned> gpu_res_float(
d_res_float, rows, cols);
gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f);
gpu_float2.device(gpu_device) = gpu_float2.random() - gpu_float2.constant(0.5f);
typedef Tensor<float, 2>::DimensionPair DimPair;
Eigen::array<DimPair, 1> dims(DimPair(1, 0));
gpu_res_float.device(gpu_device) = gpu_float1.contract(gpu_float2, dims).cast<Eigen::half>();
gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().contract(gpu_float2.cast<Eigen::half>(), dims);
Tensor<Eigen::half, 2> half_prec(rows, cols);
Tensor<Eigen::half, 2> full_prec(rows, cols);
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(Eigen::half));
gpu_device.synchronize();
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
std::cout << "Checking contract " << i << " " << j << full_prec(i, j) << " " << half_prec(i, j) << std::endl;
if (numext::abs(full_prec(i, j) - half_prec(i, j)) > Eigen::half(1e-2f)) {
VERIFY_IS_APPROX(full_prec(i, j), half_prec(i, j));
}
}
}
gpu_device.deallocate(d_float1);
gpu_device.deallocate(d_float2);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
template<typename>
void test_cuda_reductions(int size1, int size2, int redux) {
std::cout << "Reducing " << size1 << " by " << size2
<< " tensor along dim " << redux << std::endl;
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = size1*size2;
int result_size = (redux == 1 ? size1 : size2);
float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(result_size * sizeof(Eigen::half));
Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(result_size * sizeof(Eigen::half));
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1(
d_float1, size1, size2);
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2(
d_float2, size1, size2);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res_half(
d_res_half, result_size);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res_float(
d_res_float, result_size);
gpu_float1.device(gpu_device) = gpu_float1.random() * 2.0f;
gpu_float2.device(gpu_device) = gpu_float2.random() * 2.0f;
Eigen::array<int, 1> redux_dim = {{redux}};
gpu_res_float.device(gpu_device) = gpu_float1.sum(redux_dim).cast<Eigen::half>();
gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().sum(redux_dim);
Tensor<Eigen::half, 1> half_prec(result_size);
Tensor<Eigen::half, 1> full_prec(result_size);
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, result_size*sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, result_size*sizeof(Eigen::half));
gpu_device.synchronize();
for (int i = 0; i < result_size; ++i) {
std::cout << "EXPECTED " << full_prec(i) << " GOT " << half_prec(i) << std::endl;
VERIFY_IS_APPROX(full_prec(i), half_prec(i));
}
gpu_device.deallocate(d_float1);
gpu_device.deallocate(d_float2);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
template<typename>
void test_cuda_reductions() {
test_cuda_reductions<void>(13, 13, 0);
test_cuda_reductions<void>(13, 13, 1);
test_cuda_reductions<void>(35, 36, 0);
test_cuda_reductions<void>(35, 36, 1);
test_cuda_reductions<void>(36, 35, 0);
test_cuda_reductions<void>(36, 35, 1);
}
template<typename>
void test_cuda_full_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int size = 13;
int num_elem = size*size;
float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(1 * sizeof(Eigen::half));
Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(1 * sizeof(Eigen::half));
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1(
d_float1, size, size);
Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2(
d_float2, size, size);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 0>, Eigen::Aligned> gpu_res_half(
d_res_half);
Eigen::TensorMap<Eigen::Tensor<Eigen::half, 0>, Eigen::Aligned> gpu_res_float(
d_res_float);
gpu_float1.device(gpu_device) = gpu_float1.random();
gpu_float2.device(gpu_device) = gpu_float2.random();
gpu_res_float.device(gpu_device) = gpu_float1.sum().cast<Eigen::half>();
gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().sum();
Tensor<Eigen::half, 0> half_prec;
Tensor<Eigen::half, 0> full_prec;
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::half));
gpu_device.synchronize();
VERIFY_IS_APPROX(full_prec(), half_prec());
gpu_res_float.device(gpu_device) = gpu_float1.maximum().cast<Eigen::half>();
gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().maximum();
gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, sizeof(Eigen::half));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::half));
gpu_device.synchronize();
VERIFY_IS_APPROX(full_prec(), half_prec());
gpu_device.deallocate(d_float1);
gpu_device.deallocate(d_float2);
gpu_device.deallocate(d_res_half);
gpu_device.deallocate(d_res_float);
}
template<typename>
void test_cuda_forced_evals() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
int num_elem = 101;
float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_half1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_half2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float(
d_float, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half1(
d_res_half1, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Unaligned> gpu_res_half2(
d_res_half2, num_elem);
Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float(
d_res_float, num_elem);
Eigen::array<int, 1> no_bcast;
no_bcast[0] = 1;
gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f);
gpu_res_float.device(gpu_device) = gpu_float.abs();
gpu_res_half1.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().eval().cast<float>();
gpu_res_half2.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().broadcast(no_bcast).eval().cast<float>();
Tensor<float, 1> half_prec1(num_elem);
Tensor<float, 1> half_prec2(num_elem);
Tensor<float, 1> full_prec(num_elem);
gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res_half1, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res_half1, num_elem*sizeof(float));
gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float));
gpu_device.synchronize();
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking forced eval " << i << full_prec(i) << " vs " << half_prec1(i) << " vs " << half_prec2(i) << std::endl;
VERIFY_IS_APPROX(full_prec(i), half_prec1(i));
VERIFY_IS_APPROX(full_prec(i), half_prec2(i));
}
gpu_device.deallocate(d_float);
gpu_device.deallocate(d_res_half1);
gpu_device.deallocate(d_res_half2);
gpu_device.deallocate(d_res_float);
}
#endif
void test_cxx11_tensor_of_float16_cuda()
{
CALL_SUBTEST_1(test_cuda_numext<void>());
#ifdef EIGEN_HAS_CUDA_FP16
CALL_SUBTEST_1(test_cuda_conversion<void>());
CALL_SUBTEST_1(test_cuda_unary<void>());
CALL_SUBTEST_1(test_cuda_elementwise<void>());
CALL_SUBTEST_1(test_cuda_trancendental<void>());
CALL_SUBTEST_2(test_cuda_contractions<void>());
CALL_SUBTEST_3(test_cuda_reductions<void>());
CALL_SUBTEST_4(test_cuda_full_reductions<void>());
CALL_SUBTEST_5(test_cuda_forced_evals<void>());
#else
std::cout << "Half floats are not supported by this version of cuda: skipping the test" << std::endl;
#endif
}
|
c66a17e6698ac1171893fcca975698607eb91c77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sphere.h"
#include "rts/math/legendre.h"
__global__ void gpuScalarUsp(bsComplex* Usp, bsComplex* h, bsComplex* B, int Nl, int rR, int thetaR)
{
//get the current coordinate in the plane slice
int ir = blockIdx.x * blockDim.x + threadIdx.x;
int itheta = blockIdx.y * blockDim.y + threadIdx.y;
//make sure that the thread indices are in-bounds
if(itheta >= thetaR || ir >= rR) return;
int i = itheta * rR + ir;
//ptype dr = (rmax - a) / (rR - 1);
ptype dtheta = (PI) / (thetaR - 1);
//comptue the current angle and distance
//ptype r = dr * ir + a;
ptype theta = dtheta * itheta;
ptype cos_theta = cos(theta);
//initialize the Legendre polynomial
ptype P[2];
rts::init_legendre<ptype>(cos_theta, P[0], P[1]);
//initialize the result
bsComplex Us((ptype)0, (ptype)0);
//for each order l
for(int l=0; l <= Nl; l++)
{
if(l == 0)
{
Us += B[l] * h[ir * (Nl+1) + l] * P[0];
//Us += P[0];
}
else
{
if(l > 1)
{
rts::shift_legendre<ptype>(l, cos_theta, P[0], P[1]);
}
Us += B[l] * h[ir * (Nl+1) + l] * P[1];
//Us += P[1];
}
}
Usp[i] = Us;
//Usp[i] = h[ir * (Nl+1)];
//Usp[i] = ir;
}
__global__ void gpuScalarUip(bsComplex* Uip, bsComplex* j, bsComplex* A, int Nl, int aR, int thetaR)
{
//get the current coordinate in the plane slice
int ia = blockIdx.x * blockDim.x + threadIdx.x;
int itheta = blockIdx.y * blockDim.y + threadIdx.y;
//make sure that the thread indices are in-bounds
if(itheta >= thetaR || ia >= aR) return;
int i = itheta * aR + ia;
ptype dtheta = (PI) / (thetaR - 1);
//comptue the current angle and distance
ptype theta = dtheta * itheta;
ptype cos_theta = cos(theta);
//initialize the Legendre polynomial
ptype P[2];
rts::init_legendre<ptype>(cos_theta, P[0], P[1]);
//initialize the result
bsComplex Ui((ptype)0, (ptype)0);
//for each order l
for(int l=0; l <= Nl; l++)
{
if(l == 0)
{
Ui += A[l] * j[ia * (Nl+1) + l] * P[0];
}
else
{
if(l > 1)
{
rts::shift_legendre<ptype>(l, cos_theta, P[0], P[1]);
}
Ui += A[l] * j[ia * (Nl+1) + l] * P[1];
}
}
Uip[i] = Ui;
}
void sphere::scalarUsp(bsComplex* h, int rR, int thetaR)
{
//copy the hankel function to the GPU
bsComplex* gpu_h;
HANDLE_ERROR( hipMalloc( (void**)&gpu_h, sizeof(bsComplex) * (Nl + 1) * rR ) );
HANDLE_ERROR( hipMemcpy( gpu_h, h, sizeof(bsComplex) * (Nl + 1) * rR, hipMemcpyHostToDevice ) );
//allocate memory for the scattering coefficients
bsComplex* gpuB;
HANDLE_ERROR(hipMalloc((void**) &gpuB, (Nl+1) * sizeof(bsComplex)));
//copy the scattering coefficients to the GPU
HANDLE_ERROR(hipMemcpy(gpuB, &B[0], (Nl+1) * sizeof(bsComplex), hipMemcpyHostToDevice));
//create one thread for each pixel of the field slice
dim3 dimBlock(SQRT_BLOCK, SQRT_BLOCK);
dim3 dimGrid((Usp.R[0] + SQRT_BLOCK -1)/SQRT_BLOCK, (Usp.R[1] + SQRT_BLOCK - 1)/SQRT_BLOCK);
hipLaunchKernelGGL(( gpuScalarUsp), dim3(dimGrid), dim3(dimBlock), 0, 0, Usp.x_hat, gpu_h, gpuB, Nl, rR, thetaR);
//free memory
hipFree(gpu_h);
hipFree(gpuB);
}
void sphere::scalarUip(bsComplex* j, int rR, int thetaR)
{
//copy the bessel and hankel LUTs to the GPU
bsComplex* gpu_j;
HANDLE_ERROR( hipMalloc( (void**)&gpu_j, sizeof(bsComplex) * (Nl + 1) * rR ) );
HANDLE_ERROR( hipMemcpy( gpu_j, j, sizeof(bsComplex) * (Nl + 1) * rR, hipMemcpyHostToDevice ) );
//allocate memory for the scattering coefficients
bsComplex* gpuA;
HANDLE_ERROR(hipMalloc((void**) &gpuA, (Nl+1) * sizeof(bsComplex)));
//copy the scattering coefficients to the GPU
HANDLE_ERROR(hipMemcpy(gpuA, &A[0], (Nl+1) * sizeof(bsComplex), hipMemcpyHostToDevice));
//create one thread for each pixel of the field slice
dim3 dimBlock(SQRT_BLOCK, SQRT_BLOCK);
dim3 dimGrid((Uip.R[0] + SQRT_BLOCK -1)/SQRT_BLOCK, (Uip.R[1] + SQRT_BLOCK - 1)/SQRT_BLOCK);
hipLaunchKernelGGL(( gpuScalarUip), dim3(dimGrid), dim3(dimBlock), 0, 0, Uip.x_hat, gpu_j, gpuA, Nl, rR, thetaR);
//free memory
hipFree(gpu_j);
hipFree(gpuA);
}
| c66a17e6698ac1171893fcca975698607eb91c77.cu | #include "sphere.h"
#include "rts/math/legendre.h"
__global__ void gpuScalarUsp(bsComplex* Usp, bsComplex* h, bsComplex* B, int Nl, int rR, int thetaR)
{
//get the current coordinate in the plane slice
int ir = blockIdx.x * blockDim.x + threadIdx.x;
int itheta = blockIdx.y * blockDim.y + threadIdx.y;
//make sure that the thread indices are in-bounds
if(itheta >= thetaR || ir >= rR) return;
int i = itheta * rR + ir;
//ptype dr = (rmax - a) / (rR - 1);
ptype dtheta = (PI) / (thetaR - 1);
//comptue the current angle and distance
//ptype r = dr * ir + a;
ptype theta = dtheta * itheta;
ptype cos_theta = cos(theta);
//initialize the Legendre polynomial
ptype P[2];
rts::init_legendre<ptype>(cos_theta, P[0], P[1]);
//initialize the result
bsComplex Us((ptype)0, (ptype)0);
//for each order l
for(int l=0; l <= Nl; l++)
{
if(l == 0)
{
Us += B[l] * h[ir * (Nl+1) + l] * P[0];
//Us += P[0];
}
else
{
if(l > 1)
{
rts::shift_legendre<ptype>(l, cos_theta, P[0], P[1]);
}
Us += B[l] * h[ir * (Nl+1) + l] * P[1];
//Us += P[1];
}
}
Usp[i] = Us;
//Usp[i] = h[ir * (Nl+1)];
//Usp[i] = ir;
}
__global__ void gpuScalarUip(bsComplex* Uip, bsComplex* j, bsComplex* A, int Nl, int aR, int thetaR)
{
//get the current coordinate in the plane slice
int ia = blockIdx.x * blockDim.x + threadIdx.x;
int itheta = blockIdx.y * blockDim.y + threadIdx.y;
//make sure that the thread indices are in-bounds
if(itheta >= thetaR || ia >= aR) return;
int i = itheta * aR + ia;
ptype dtheta = (PI) / (thetaR - 1);
//comptue the current angle and distance
ptype theta = dtheta * itheta;
ptype cos_theta = cos(theta);
//initialize the Legendre polynomial
ptype P[2];
rts::init_legendre<ptype>(cos_theta, P[0], P[1]);
//initialize the result
bsComplex Ui((ptype)0, (ptype)0);
//for each order l
for(int l=0; l <= Nl; l++)
{
if(l == 0)
{
Ui += A[l] * j[ia * (Nl+1) + l] * P[0];
}
else
{
if(l > 1)
{
rts::shift_legendre<ptype>(l, cos_theta, P[0], P[1]);
}
Ui += A[l] * j[ia * (Nl+1) + l] * P[1];
}
}
Uip[i] = Ui;
}
void sphere::scalarUsp(bsComplex* h, int rR, int thetaR)
{
//copy the hankel function to the GPU
bsComplex* gpu_h;
HANDLE_ERROR( cudaMalloc( (void**)&gpu_h, sizeof(bsComplex) * (Nl + 1) * rR ) );
HANDLE_ERROR( cudaMemcpy( gpu_h, h, sizeof(bsComplex) * (Nl + 1) * rR, cudaMemcpyHostToDevice ) );
//allocate memory for the scattering coefficients
bsComplex* gpuB;
HANDLE_ERROR(cudaMalloc((void**) &gpuB, (Nl+1) * sizeof(bsComplex)));
//copy the scattering coefficients to the GPU
HANDLE_ERROR(cudaMemcpy(gpuB, &B[0], (Nl+1) * sizeof(bsComplex), cudaMemcpyHostToDevice));
//create one thread for each pixel of the field slice
dim3 dimBlock(SQRT_BLOCK, SQRT_BLOCK);
dim3 dimGrid((Usp.R[0] + SQRT_BLOCK -1)/SQRT_BLOCK, (Usp.R[1] + SQRT_BLOCK - 1)/SQRT_BLOCK);
gpuScalarUsp<<<dimGrid, dimBlock>>>(Usp.x_hat, gpu_h, gpuB, Nl, rR, thetaR);
//free memory
cudaFree(gpu_h);
cudaFree(gpuB);
}
void sphere::scalarUip(bsComplex* j, int rR, int thetaR)
{
//copy the bessel and hankel LUTs to the GPU
bsComplex* gpu_j;
HANDLE_ERROR( cudaMalloc( (void**)&gpu_j, sizeof(bsComplex) * (Nl + 1) * rR ) );
HANDLE_ERROR( cudaMemcpy( gpu_j, j, sizeof(bsComplex) * (Nl + 1) * rR, cudaMemcpyHostToDevice ) );
//allocate memory for the scattering coefficients
bsComplex* gpuA;
HANDLE_ERROR(cudaMalloc((void**) &gpuA, (Nl+1) * sizeof(bsComplex)));
//copy the scattering coefficients to the GPU
HANDLE_ERROR(cudaMemcpy(gpuA, &A[0], (Nl+1) * sizeof(bsComplex), cudaMemcpyHostToDevice));
//create one thread for each pixel of the field slice
dim3 dimBlock(SQRT_BLOCK, SQRT_BLOCK);
dim3 dimGrid((Uip.R[0] + SQRT_BLOCK -1)/SQRT_BLOCK, (Uip.R[1] + SQRT_BLOCK - 1)/SQRT_BLOCK);
gpuScalarUip<<<dimGrid, dimBlock>>>(Uip.x_hat, gpu_j, gpuA, Nl, rR, thetaR);
//free memory
cudaFree(gpu_j);
cudaFree(gpuA);
}
|
c44be1cbc04a0d6d341316f9d5ff1679c6517147.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <caffe2/core/context_gpu.h>
#include "caffe2/operator/back_mean_op.h"
namespace caffe2 {
int back_mean_strip(std::vector<int64_t>& dims, int count) {
auto size = 1;
while (count--) {
size *= dims.back();
dims.pop_back();
}
return size;
}
namespace {
__global__ void BackMeanKernel(const int N, const int C, const float D,
const float* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
float sum = 0;
for (int j = i * C, e = j + C; j != e; j++) {
sum += X[j];
}
Y[i] = sum / D;
}
}
} // namespace
template <>
bool BackMeanOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto dims = X.sizes().vec();
auto size = back_mean_strip(dims, count_);
Y->Resize(dims);
if (Y->size() > 0) {
hipLaunchKernelGGL(( BackMeanKernel), dim3(CAFFE_GET_BLOCKS(Y->size())), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(), Y->size(), size, (float)size,
X.data<float>(),
Y->mutable_data<float>());
}
return true;
}
namespace {
__global__ void BackMeanGradientKernel(const int N, const int C, const float D,
const float* dY, float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = dY[i / C] / D; }
}
} // namespace
template <>
bool BackMeanGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
auto dims = X.sizes().vec();
auto size = back_mean_strip(dims, count_);
DCHECK_EQ(dY.size() * size, dX->size());
if (dY.size() > 0) {
hipLaunchKernelGGL(( BackMeanGradientKernel), dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
dX->size(), size, (float)size, dY.data<float>(),
dX->mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(BackMean, BackMeanOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(BackMeanGradient,
BackMeanGradientOp<float, CUDAContext>);
} // namespace caffe2
| c44be1cbc04a0d6d341316f9d5ff1679c6517147.cu | #include <caffe2/core/context_gpu.h>
#include "caffe2/operator/back_mean_op.h"
namespace caffe2 {
int back_mean_strip(std::vector<int64_t>& dims, int count) {
auto size = 1;
while (count--) {
size *= dims.back();
dims.pop_back();
}
return size;
}
namespace {
__global__ void BackMeanKernel(const int N, const int C, const float D,
const float* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
float sum = 0;
for (int j = i * C, e = j + C; j != e; j++) {
sum += X[j];
}
Y[i] = sum / D;
}
}
} // namespace
template <>
bool BackMeanOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto dims = X.sizes().vec();
auto size = back_mean_strip(dims, count_);
Y->Resize(dims);
if (Y->size() > 0) {
BackMeanKernel<<<CAFFE_GET_BLOCKS(Y->size()), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(Y->size(), size, (float)size,
X.data<float>(),
Y->mutable_data<float>());
}
return true;
}
namespace {
__global__ void BackMeanGradientKernel(const int N, const int C, const float D,
const float* dY, float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = dY[i / C] / D; }
}
} // namespace
template <>
bool BackMeanGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
auto dims = X.sizes().vec();
auto size = back_mean_strip(dims, count_);
DCHECK_EQ(dY.size() * size, dX->size());
if (dY.size() > 0) {
BackMeanGradientKernel<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
dX->size(), size, (float)size, dY.data<float>(),
dX->mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(BackMean, BackMeanOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(BackMeanGradient,
BackMeanGradientOp<float, CUDAContext>);
} // namespace caffe2
|
4c96ee7cb5301177469fbf30a0503996cd5e6d13.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "common.h"
// Like printf, but red. Limited to 1000 characters.
void red_printf(const char *format, ...)
{
#define RED_LIM 1000
va_list args;
int i;
char buf1[RED_LIM], buf2[RED_LIM];
memset(buf1, 0, RED_LIM);
memset(buf2, 0, RED_LIM);
va_start(args, format);
// Marshal the stuff to print in a buffer
vsnprintf(buf1, RED_LIM, format, args);
// Probably a bad check for buffer overflow
for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) {
assert(buf1[i] == 0);
}
// Add markers for red color and reset color
snprintf(buf2, 1000, "\033[31m%s\033[0m", buf1);
// Probably another bad check for buffer overflow
for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) {
assert(buf2[i] == 0);
}
printf("%s", buf2);
va_end(args);
}
void printDeviceProperties()
{
struct hipDeviceProp_t deviceProp;
int ret = hipGetDeviceProperties(&deviceProp, 0);
CPE(ret != hipSuccess, "Get Device Properties failed\n");
printf("\n=================DEVICE PROPERTIES=================\n");
printf("\tDevice name: %s\n", deviceProp.name);
printf("\tTotal global memory: %lu bytes\n", deviceProp.totalGlobalMem);
printf("\tWarp size: %d\n", deviceProp.warpSize);
printf("\tCompute capability: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("\tMulti-processor count: %d\n", deviceProp.multiProcessorCount);
printf("\tThreads per multi-processor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf("\n");
}
// Returns when all N elements in A are non-zero
void waitForNonZero(volatile int *A, int N)
{
int i, turns = 0;
while(1) {
int allNonZero = 1;
for(i = 0; i < N; i ++) {
if(A[i] == 0) {
allNonZero = 0;
}
}
if(allNonZero) {
return;
}
turns ++;
if(turns > 1000000) {
printf("Waiting for non-zero ...\n");
turns = 0;
}
}
}
/** < Useful for sorting an array of doubles */
int cmpfunc (const void *a, const void *b)
{
double a_d = *(double *) a;
double b_d = *(double *) b;
if(a_d > b_d) {
return 1;
} else if(a_d < b_d) {
return -1;
} else {
return 0;
}
}
double get_timespec_us(struct timespec start, struct timespec end)
{
double ret =
(double) (end.tv_nsec - start.tv_nsec) / 1000 +
(end.tv_sec - start.tv_sec) * 1000000;
return ret;
}
| 4c96ee7cb5301177469fbf30a0503996cd5e6d13.cu | #include <cuda_runtime.h>
#include "common.h"
// Like printf, but red. Limited to 1000 characters.
void red_printf(const char *format, ...)
{
#define RED_LIM 1000
va_list args;
int i;
char buf1[RED_LIM], buf2[RED_LIM];
memset(buf1, 0, RED_LIM);
memset(buf2, 0, RED_LIM);
va_start(args, format);
// Marshal the stuff to print in a buffer
vsnprintf(buf1, RED_LIM, format, args);
// Probably a bad check for buffer overflow
for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) {
assert(buf1[i] == 0);
}
// Add markers for red color and reset color
snprintf(buf2, 1000, "\033[31m%s\033[0m", buf1);
// Probably another bad check for buffer overflow
for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) {
assert(buf2[i] == 0);
}
printf("%s", buf2);
va_end(args);
}
void printDeviceProperties()
{
struct cudaDeviceProp deviceProp;
int ret = cudaGetDeviceProperties(&deviceProp, 0);
CPE(ret != cudaSuccess, "Get Device Properties failed\n");
printf("\n=================DEVICE PROPERTIES=================\n");
printf("\tDevice name: %s\n", deviceProp.name);
printf("\tTotal global memory: %lu bytes\n", deviceProp.totalGlobalMem);
printf("\tWarp size: %d\n", deviceProp.warpSize);
printf("\tCompute capability: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("\tMulti-processor count: %d\n", deviceProp.multiProcessorCount);
printf("\tThreads per multi-processor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf("\n");
}
// Returns when all N elements in A are non-zero
void waitForNonZero(volatile int *A, int N)
{
int i, turns = 0;
while(1) {
int allNonZero = 1;
for(i = 0; i < N; i ++) {
if(A[i] == 0) {
allNonZero = 0;
}
}
if(allNonZero) {
return;
}
turns ++;
if(turns > 1000000) {
printf("Waiting for non-zero ...\n");
turns = 0;
}
}
}
/** < Useful for sorting an array of doubles */
int cmpfunc (const void *a, const void *b)
{
double a_d = *(double *) a;
double b_d = *(double *) b;
if(a_d > b_d) {
return 1;
} else if(a_d < b_d) {
return -1;
} else {
return 0;
}
}
double get_timespec_us(struct timespec start, struct timespec end)
{
double ret =
(double) (end.tv_nsec - start.tv_nsec) / 1000 +
(end.tv_sec - start.tv_sec) * 1000000;
return ret;
}
|
4bceef1d4e859d2fe3cd58823a0ec6fedce108bf.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define N 5
#define MAX_ERR 1e-6
__global__ void vector_add(float *out, float *a, float *b, int n) {
for (int i = 0; i < n; i++) {
out[i] = a[i] + b[i];
}
}
int main() {
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// Allocate host memory
a = (float *)malloc(sizeof(float) * N);
b = (float *)malloc(sizeof(float) * N);
out = (float *)malloc(sizeof(float) * N);
// Initialize host arrays
for (int i = 0; i < N; i++) {
a[i] = 1.0f;
b[i] = 2.0f;
}
// Allocate device memory
hipMalloc((void **)&d_a, sizeof(float) * N);
hipMalloc((void **)&d_b, sizeof(float) * N);
hipMalloc((void **)&d_out, sizeof(float) * N);
// Transfer data from host to device memory
hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(float) * N, hipMemcpyHostToDevice);
// Executing kernel
hipLaunchKernelGGL(( vector_add), dim3(1), dim3(1), 0, 0, d_out, d_a, d_b, N);
// Transfer data back to host memory
hipMemcpy(out, d_out, sizeof(float) * N, hipMemcpyDeviceToHost);
// hipMemcpy(out, d_a, sizeof(float) * N, hipMemcpyDeviceToHost);
// output
for (int i = 0; i < N; i++) {
printf("%.6f\n", *a);
}
for (int i = 0; i < N; i++) {
printf("%.6f\n", *b);
}
for (int i = 0; i < N; i++) {
printf("%.6f\n", *out);
}
// Verification
// for (int i = 0; i < N; i++) {
// assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
// }
printf("out[0] = %f\n", out[0]);
printf("PASSED\n");
// Deallocate device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
return 0;
} | 4bceef1d4e859d2fe3cd58823a0ec6fedce108bf.cu | #include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define N 5
#define MAX_ERR 1e-6
__global__ void vector_add(float *out, float *a, float *b, int n) {
for (int i = 0; i < n; i++) {
out[i] = a[i] + b[i];
}
}
int main() {
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// Allocate host memory
a = (float *)malloc(sizeof(float) * N);
b = (float *)malloc(sizeof(float) * N);
out = (float *)malloc(sizeof(float) * N);
// Initialize host arrays
for (int i = 0; i < N; i++) {
a[i] = 1.0f;
b[i] = 2.0f;
}
// Allocate device memory
cudaMalloc((void **)&d_a, sizeof(float) * N);
cudaMalloc((void **)&d_b, sizeof(float) * N);
cudaMalloc((void **)&d_out, sizeof(float) * N);
// Transfer data from host to device memory
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// Executing kernel
vector_add<<<1, 1>>>(d_out, d_a, d_b, N);
// Transfer data back to host memory
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
// cudaMemcpy(out, d_a, sizeof(float) * N, cudaMemcpyDeviceToHost);
// output
for (int i = 0; i < N; i++) {
printf("%.6f\n", *a);
}
for (int i = 0; i < N; i++) {
printf("%.6f\n", *b);
}
for (int i = 0; i < N; i++) {
printf("%.6f\n", *out);
}
// Verification
// for (int i = 0; i < N; i++) {
// assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
// }
printf("out[0] = %f\n", out[0]);
printf("PASSED\n");
// Deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
return 0;
} |
6623a0122c6f7a0a7b320cac51bedeaf01a213a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by marco on 05/09/19.
//
#include "cudaCanny.h"
#include <omp.h>
#include <vector>
#include <iostream>
#include <fstream>
#define HIGH_THRESHOLD 140
#define LOW_THRESHOLD 70
#define GRIDVAL 16
cudaCanny::cudaCanny(cv::Mat inImage, const char *imgName, int size, double sigma) {
inputImage = inImage;
inputImageFileName = imgName;
generateFilter(size, sigma); // create filter
}
__global__ void _canny_apply_filter_(unsigned char* d_src, double* d_filter, unsigned char* d_dst, int filterSize, int filteredSize, int inputSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= 0 && y >= 0 && x < filteredSize && y < filteredSize) {
double sum = 0;
for (int i = 0; i < filterSize; i++)
for (int j = 0; j < filterSize; j++) {
sum += d_filter[i * filterSize + j] * (double) (d_src[(y + i) * inputSize + (x + j)]);
}
d_dst[x + y * filteredSize] = sum;
}
}
__global__ void _canny_angle_map_(unsigned char* d_src, unsigned char* d_dst, unsigned char* d_angleMap, int outputSize, int inputSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
double sq;
if (x >= 0 && y >= 0 && x < outputSize && y < outputSize) {
float dx, dy;
dx = (-1 * d_src[y * inputSize + x]) + (-2 * d_src[(y + 1) * inputSize + x]) + (-1 * d_src[(y + 2) * inputSize + x]) +
(d_src[y * inputSize + (x + 2)]) + (2 * d_src[(y + 1) * inputSize + (x + 2)]) + (d_src[(y + 2) * inputSize + (x + 2)]);
dy = (d_src[y * inputSize + x]) + (2 * d_src[y * inputSize + (x + 1)]) + (d_src[y * inputSize + (x + 2)]) +
(-1 * d_src[(y + 2) * inputSize + x]) + (-2 * d_src[(y + 2) * inputSize + (x + 1)]) +
(-1 * d_src[(y + 2) * inputSize + (x + 2)]);
sq = sqrt(float((dx * dx) + (dy * dy)));
if (sq > 255) d_dst[y * outputSize + x] = 255;
else d_dst[y * outputSize + x] = sq;
if (dx == 0) d_angleMap[y * outputSize + x] = 90;
else d_angleMap[y * outputSize + x] = atan(dy / dx);
}
}
__global__ void _nonmax_suppression_(unsigned char* d_src, unsigned char* d_angleMap, unsigned char* d_dst, int outputSize, int inputSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x > 0 && y > 0 && x < outputSize && y < outputSize) {
float tan = d_angleMap[y*inputSize + x]; // corresponding tangent value in angles map
d_dst[(y-1)*outputSize + x-1] = d_src[y*inputSize + x];
//Horizontal Edge
if (((-22.5 < tan) && (tan <= 22.5)) || ((157.5 < tan) && (tan <= -157.5))) {
if ((d_src[y*inputSize + x] < d_src[(y+1)*inputSize + x]) ||
(d_src[y*inputSize + x] < d_src[(y-1)*inputSize + x]))
d_dst[(y-1)*outputSize + x-1] = 0;
}
//Vertical Edge
if (((-112.5 < tan) && (tan <= -67.5)) || ((67.5 < tan) && (tan <= 112.5))) {
if ((d_src[y*inputSize + x] < d_src[y*inputSize + x+1]) ||
(d_src[y*inputSize + x] < d_src[y*inputSize + x-1]))
d_dst[(y-1)*outputSize + x-1] = 0;
}
//-45 Degree Edge
if (((-67.5 < tan) && (tan <= -22.5)) || ((112.5 < tan) && (tan <= 157.5))) {
if ((d_src[y*inputSize + x] < d_src[(y+1)*inputSize + x-1]) ||
(d_src[y*inputSize + x] < d_src[(y-1)*inputSize + x+1]))
d_dst[(y-1)*outputSize + x-1] = 0;
}
//45 Degree Edge
if (((-157.5 < tan) && (tan <= -112.5)) || ((22.5 < tan) && (tan <= 67.5))) {
if ((d_src[y*inputSize + x] < d_src[(y+1)*inputSize + x+1]) ||
(d_src[y*inputSize + x] < d_src[(y-1)*inputSize + x-1]))
d_dst[(y-1)*outputSize + x-1] = 0;
}
}
}
__global__ void _edges_concatenation_(unsigned char* d_src, unsigned char* d_dst, int highthr, int lowthr, int size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x > 0 && y > 0 && x < size-1 && y < size-1) {
int pixelVal = d_src[y * size + x];
if (pixelVal > highthr) {
// strong edge
d_dst[y * size + x] = 255;
} else if (pixelVal <= highthr && pixelVal >= lowthr) {
// is connected to a strong edge?
// check if region is feasible ( 8-bit neighbours)
// check neighbours
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
// region is feasible
int pVal = d_src[(y+j) * size + i+x];
if (pVal >= highthr) {
d_dst[y * size + x] = 255; // connected to a strong edge
break;
} else if (pVal < lowthr) {
d_dst[y * size + x] = 0;
break;
}
}
}
} else if (pixelVal < lowthr) {
d_dst[y * size + x] = 0; // suppression
}
}
else d_dst[y * size + x] = 0;
}
/***
* creates a gaussian filter of the given size with the specified sigma
* @param size
* @param sigma
* @return returns the gaussian filter
*/
void cudaCanny::generateFilter(int size, double sigma) {
std::vector<double> filter(size*size); // output filter (size*size)
double r, s = 2.0 * sigma * sigma;
double sum = 0; // for filter normalization
// fill the filter
for (int y = 0 ; y < size; y++) {
for (int x = 0; x < size; x++) {
r = sqrt((y-size/2)*(y-size/2) + (x-size/2)*(x-size/2) );
filter[y*size + x] = (exp(-(r * r) / s)) / (M_PI * s);
sum += filter[y*size + x];
}
}
// normalize elements from 0 to 1
for (int i = 0; i < size*size; i++) {
filter[i] /= sum;
}
gaussianFilter = filter;
}
cv::Mat cudaCanny::computeCuda() {
// STEP 1: GAUSSIAN FILTER
int size = (int)sqrt(gaussianFilter.size())/2;
cv::Mat gaussianFiltered = cv::Mat(inputImage.rows - 2*size, inputImage.cols - 2*size, CV_8UC1, cv::Scalar(0)); // creates an empty output image
unsigned char* d_src = nullptr;
unsigned char* d_dst = nullptr;
const size_t ARRAY_BYTES = inputImage.cols * inputImage.rows * sizeof(unsigned char);
double* d_filter = &gaussianFilter[0];
hipMalloc((void**) &d_src, ARRAY_BYTES);
hipMalloc((void**) &d_filter, gaussianFilter.size()*sizeof(double));
hipMalloc((void**) &d_dst, (inputImage.cols - 2*size) * (inputImage.rows - 2*size) * sizeof(unsigned char));
hipMemcpy(d_src, inputImage.data, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_filter, gaussianFilter.data(), gaussianFilter.size()*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_dst, gaussianFiltered.data, (inputImage.cols - 2*size) * (inputImage.rows - 2*size) * sizeof(unsigned char), hipMemcpyHostToDevice);
dim3 threads(GRIDVAL,GRIDVAL);
dim3 blocks((inputImage.cols - 2*size) / GRIDVAL + 1, (inputImage.rows - 2*size) / GRIDVAL + 1);
hipLaunchKernelGGL(( _canny_apply_filter_), dim3(blocks), dim3(threads), 0, 0, d_src, d_filter, d_dst, sqrt(gaussianFilter.size()), inputImage.cols - 2*size, inputImage.cols);
// waits until is done
hipDeviceSynchronize();
hipMemcpy(gaussianFiltered.data, d_dst, (inputImage.cols - 2*size) * (inputImage.rows - 2*size) * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipFree(d_src);
hipFree(d_filter);
hipFree(d_dst);
// STEP 2: SOBEL FILTER
cv::Mat sobelFiltered = cv::Mat(gaussianFiltered.rows - 2, gaussianFiltered.cols - 2, CV_8UC1);
anglesMap = cv::Mat(gaussianFiltered.rows - 2, gaussianFiltered.cols - 2, CV_32FC1); //AngleMap
unsigned char* d_src2 = nullptr;
unsigned char* d_dst2 = nullptr;
unsigned char* d_angleMap = nullptr;
hipMalloc((void**) &d_src2, (gaussianFiltered.cols) * (gaussianFiltered.rows) * sizeof(unsigned char));
hipMalloc((void**) &d_dst2, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char));
hipMalloc((void**) &d_angleMap, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char));
hipMemcpy(d_src2, gaussianFiltered.data, (gaussianFiltered.cols) * (gaussianFiltered.rows) * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(d_dst2, sobelFiltered.data, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(d_angleMap, anglesMap.data, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char), hipMemcpyHostToDevice);
dim3 blocks2((gaussianFiltered.cols - 2) / GRIDVAL + 1, (gaussianFiltered.rows - 2) / GRIDVAL + 1);
hipLaunchKernelGGL(( _canny_angle_map_), dim3(blocks2), dim3(threads), 0, 0, d_src2, d_dst2, d_angleMap, gaussianFiltered.cols - 2, gaussianFiltered.cols);
// waits until is done
hipDeviceSynchronize();
hipMemcpy(sobelFiltered.data, d_dst2, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipMemcpy(anglesMap.data, d_angleMap, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipFree(d_src2);
hipFree(d_dst2);
hipFree(d_angleMap);
// STEP 3: NON-MAXIMUM SUPRRESSION
cv::Mat nonMaxSuppressed = cv::Mat(sobelFiltered.rows-2, sobelFiltered.cols-2, CV_8UC1);
unsigned char* d_src3 = nullptr;
unsigned char* d_angleMap2 = nullptr;
unsigned char* d_dst3 = nullptr;
hipMalloc((void**) &d_src3, (sobelFiltered.cols) * (sobelFiltered.rows) * sizeof(unsigned char));
hipMalloc((void**) &d_angleMap2, (sobelFiltered.cols) * (sobelFiltered.rows) * sizeof(unsigned char));
hipMalloc((void**) &d_dst3, (sobelFiltered.cols-2) * (sobelFiltered.rows-2) * sizeof(unsigned char));
hipMemcpy(d_src3, sobelFiltered.data, (sobelFiltered.cols) * (sobelFiltered.rows) * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(d_angleMap2, sobelFiltered.data, (sobelFiltered.cols) * (sobelFiltered.rows) * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(d_dst3, nonMaxSuppressed.data, (sobelFiltered.cols-2) * (sobelFiltered.rows-2) * sizeof(unsigned char), hipMemcpyHostToDevice);
dim3 blocks3((sobelFiltered.cols - 2) / GRIDVAL + 1, (sobelFiltered.rows - 2) / GRIDVAL + 1);
hipLaunchKernelGGL(( _nonmax_suppression_), dim3(blocks3), dim3(threads), 0, 0, d_src3, d_angleMap2, d_dst3, sobelFiltered.rows-2, sobelFiltered.rows);
// waits until is done
hipDeviceSynchronize();
hipMemcpy(nonMaxSuppressed.data, d_dst3, (sobelFiltered.cols-2) * (sobelFiltered.rows-2) * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipFree(d_src3);
hipFree(d_angleMap2);
hipFree(d_dst3);
// STEP 4: STRONG EDGES CONCATENATION
cv::Mat outputImage = cv::Mat(nonMaxSuppressed.rows, nonMaxSuppressed.cols, CV_8UC1);
unsigned char* d_src4 = nullptr;
unsigned char* d_dst4 = nullptr;
hipMalloc((void**) &d_src4, nonMaxSuppressed.rows*nonMaxSuppressed.cols*sizeof(unsigned char));
hipMalloc((void**) &d_dst4, nonMaxSuppressed.rows*nonMaxSuppressed.cols*sizeof(unsigned char));
hipMemcpy(d_src4, nonMaxSuppressed.data, nonMaxSuppressed.rows * nonMaxSuppressed.cols * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(d_dst4, outputImage.data, nonMaxSuppressed.rows * nonMaxSuppressed.cols * sizeof(unsigned char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( _edges_concatenation_), dim3(blocks3), dim3(threads), 0, 0, d_src4, d_dst4, HIGH_THRESHOLD, LOW_THRESHOLD, nonMaxSuppressed.cols);
// waits until is done
hipDeviceSynchronize();
hipMemcpy(outputImage.data, d_dst4, nonMaxSuppressed.rows * nonMaxSuppressed.cols * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipFree(d_src4);
hipFree(d_dst4);
return outputImage;
}
| 6623a0122c6f7a0a7b320cac51bedeaf01a213a3.cu | //
// Created by marco on 05/09/19.
//
#include "cudaCanny.h"
#include <omp.h>
#include <vector>
#include <iostream>
#include <fstream>
#define HIGH_THRESHOLD 140
#define LOW_THRESHOLD 70
#define GRIDVAL 16
cudaCanny::cudaCanny(cv::Mat inImage, const char *imgName, int size, double sigma) {
inputImage = inImage;
inputImageFileName = imgName;
generateFilter(size, sigma); // create filter
}
__global__ void _canny_apply_filter_(unsigned char* d_src, double* d_filter, unsigned char* d_dst, int filterSize, int filteredSize, int inputSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= 0 && y >= 0 && x < filteredSize && y < filteredSize) {
double sum = 0;
for (int i = 0; i < filterSize; i++)
for (int j = 0; j < filterSize; j++) {
sum += d_filter[i * filterSize + j] * (double) (d_src[(y + i) * inputSize + (x + j)]);
}
d_dst[x + y * filteredSize] = sum;
}
}
__global__ void _canny_angle_map_(unsigned char* d_src, unsigned char* d_dst, unsigned char* d_angleMap, int outputSize, int inputSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
double sq;
if (x >= 0 && y >= 0 && x < outputSize && y < outputSize) {
float dx, dy;
dx = (-1 * d_src[y * inputSize + x]) + (-2 * d_src[(y + 1) * inputSize + x]) + (-1 * d_src[(y + 2) * inputSize + x]) +
(d_src[y * inputSize + (x + 2)]) + (2 * d_src[(y + 1) * inputSize + (x + 2)]) + (d_src[(y + 2) * inputSize + (x + 2)]);
dy = (d_src[y * inputSize + x]) + (2 * d_src[y * inputSize + (x + 1)]) + (d_src[y * inputSize + (x + 2)]) +
(-1 * d_src[(y + 2) * inputSize + x]) + (-2 * d_src[(y + 2) * inputSize + (x + 1)]) +
(-1 * d_src[(y + 2) * inputSize + (x + 2)]);
sq = sqrt(float((dx * dx) + (dy * dy)));
if (sq > 255) d_dst[y * outputSize + x] = 255;
else d_dst[y * outputSize + x] = sq;
if (dx == 0) d_angleMap[y * outputSize + x] = 90;
else d_angleMap[y * outputSize + x] = atan(dy / dx);
}
}
__global__ void _nonmax_suppression_(unsigned char* d_src, unsigned char* d_angleMap, unsigned char* d_dst, int outputSize, int inputSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x > 0 && y > 0 && x < outputSize && y < outputSize) {
float tan = d_angleMap[y*inputSize + x]; // corresponding tangent value in angles map
d_dst[(y-1)*outputSize + x-1] = d_src[y*inputSize + x];
//Horizontal Edge
if (((-22.5 < tan) && (tan <= 22.5)) || ((157.5 < tan) && (tan <= -157.5))) {
if ((d_src[y*inputSize + x] < d_src[(y+1)*inputSize + x]) ||
(d_src[y*inputSize + x] < d_src[(y-1)*inputSize + x]))
d_dst[(y-1)*outputSize + x-1] = 0;
}
//Vertical Edge
if (((-112.5 < tan) && (tan <= -67.5)) || ((67.5 < tan) && (tan <= 112.5))) {
if ((d_src[y*inputSize + x] < d_src[y*inputSize + x+1]) ||
(d_src[y*inputSize + x] < d_src[y*inputSize + x-1]))
d_dst[(y-1)*outputSize + x-1] = 0;
}
//-45 Degree Edge
if (((-67.5 < tan) && (tan <= -22.5)) || ((112.5 < tan) && (tan <= 157.5))) {
if ((d_src[y*inputSize + x] < d_src[(y+1)*inputSize + x-1]) ||
(d_src[y*inputSize + x] < d_src[(y-1)*inputSize + x+1]))
d_dst[(y-1)*outputSize + x-1] = 0;
}
//45 Degree Edge
if (((-157.5 < tan) && (tan <= -112.5)) || ((22.5 < tan) && (tan <= 67.5))) {
if ((d_src[y*inputSize + x] < d_src[(y+1)*inputSize + x+1]) ||
(d_src[y*inputSize + x] < d_src[(y-1)*inputSize + x-1]))
d_dst[(y-1)*outputSize + x-1] = 0;
}
}
}
__global__ void _edges_concatenation_(unsigned char* d_src, unsigned char* d_dst, int highthr, int lowthr, int size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x > 0 && y > 0 && x < size-1 && y < size-1) {
int pixelVal = d_src[y * size + x];
if (pixelVal > highthr) {
// strong edge
d_dst[y * size + x] = 255;
} else if (pixelVal <= highthr && pixelVal >= lowthr) {
// is connected to a strong edge?
// check if region is feasible ( 8-bit neighbours)
// check neighbours
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
// region is feasible
int pVal = d_src[(y+j) * size + i+x];
if (pVal >= highthr) {
d_dst[y * size + x] = 255; // connected to a strong edge
break;
} else if (pVal < lowthr) {
d_dst[y * size + x] = 0;
break;
}
}
}
} else if (pixelVal < lowthr) {
d_dst[y * size + x] = 0; // suppression
}
}
else d_dst[y * size + x] = 0;
}
/***
* creates a gaussian filter of the given size with the specified sigma
* @param size
* @param sigma
* @return returns the gaussian filter
*/
void cudaCanny::generateFilter(int size, double sigma) {
std::vector<double> filter(size*size); // output filter (size*size)
double r, s = 2.0 * sigma * sigma;
double sum = 0; // for filter normalization
// fill the filter
for (int y = 0 ; y < size; y++) {
for (int x = 0; x < size; x++) {
r = sqrt((y-size/2)*(y-size/2) + (x-size/2)*(x-size/2) );
filter[y*size + x] = (exp(-(r * r) / s)) / (M_PI * s);
sum += filter[y*size + x];
}
}
// normalize elements from 0 to 1
for (int i = 0; i < size*size; i++) {
filter[i] /= sum;
}
gaussianFilter = filter;
}
cv::Mat cudaCanny::computeCuda() {
// STEP 1: GAUSSIAN FILTER
int size = (int)sqrt(gaussianFilter.size())/2;
cv::Mat gaussianFiltered = cv::Mat(inputImage.rows - 2*size, inputImage.cols - 2*size, CV_8UC1, cv::Scalar(0)); // creates an empty output image
unsigned char* d_src = nullptr;
unsigned char* d_dst = nullptr;
const size_t ARRAY_BYTES = inputImage.cols * inputImage.rows * sizeof(unsigned char);
double* d_filter = &gaussianFilter[0];
cudaMalloc((void**) &d_src, ARRAY_BYTES);
cudaMalloc((void**) &d_filter, gaussianFilter.size()*sizeof(double));
cudaMalloc((void**) &d_dst, (inputImage.cols - 2*size) * (inputImage.rows - 2*size) * sizeof(unsigned char));
cudaMemcpy(d_src, inputImage.data, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_filter, gaussianFilter.data(), gaussianFilter.size()*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_dst, gaussianFiltered.data, (inputImage.cols - 2*size) * (inputImage.rows - 2*size) * sizeof(unsigned char), cudaMemcpyHostToDevice);
dim3 threads(GRIDVAL,GRIDVAL);
dim3 blocks((inputImage.cols - 2*size) / GRIDVAL + 1, (inputImage.rows - 2*size) / GRIDVAL + 1);
_canny_apply_filter_<<<blocks, threads>>>(d_src, d_filter, d_dst, sqrt(gaussianFilter.size()), inputImage.cols - 2*size, inputImage.cols);
// waits until is done
cudaDeviceSynchronize();
cudaMemcpy(gaussianFiltered.data, d_dst, (inputImage.cols - 2*size) * (inputImage.rows - 2*size) * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(d_src);
cudaFree(d_filter);
cudaFree(d_dst);
// STEP 2: SOBEL FILTER
cv::Mat sobelFiltered = cv::Mat(gaussianFiltered.rows - 2, gaussianFiltered.cols - 2, CV_8UC1);
anglesMap = cv::Mat(gaussianFiltered.rows - 2, gaussianFiltered.cols - 2, CV_32FC1); //AngleMap
unsigned char* d_src2 = nullptr;
unsigned char* d_dst2 = nullptr;
unsigned char* d_angleMap = nullptr;
cudaMalloc((void**) &d_src2, (gaussianFiltered.cols) * (gaussianFiltered.rows) * sizeof(unsigned char));
cudaMalloc((void**) &d_dst2, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char));
cudaMalloc((void**) &d_angleMap, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char));
cudaMemcpy(d_src2, gaussianFiltered.data, (gaussianFiltered.cols) * (gaussianFiltered.rows) * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(d_dst2, sobelFiltered.data, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(d_angleMap, anglesMap.data, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char), cudaMemcpyHostToDevice);
dim3 blocks2((gaussianFiltered.cols - 2) / GRIDVAL + 1, (gaussianFiltered.rows - 2) / GRIDVAL + 1);
_canny_angle_map_<<<blocks2, threads>>>(d_src2, d_dst2, d_angleMap, gaussianFiltered.cols - 2, gaussianFiltered.cols);
// waits until is done
cudaDeviceSynchronize();
cudaMemcpy(sobelFiltered.data, d_dst2, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(anglesMap.data, d_angleMap, (gaussianFiltered.cols - 2) * (gaussianFiltered.rows - 2) * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(d_src2);
cudaFree(d_dst2);
cudaFree(d_angleMap);
// STEP 3: NON-MAXIMUM SUPRRESSION
cv::Mat nonMaxSuppressed = cv::Mat(sobelFiltered.rows-2, sobelFiltered.cols-2, CV_8UC1);
unsigned char* d_src3 = nullptr;
unsigned char* d_angleMap2 = nullptr;
unsigned char* d_dst3 = nullptr;
cudaMalloc((void**) &d_src3, (sobelFiltered.cols) * (sobelFiltered.rows) * sizeof(unsigned char));
cudaMalloc((void**) &d_angleMap2, (sobelFiltered.cols) * (sobelFiltered.rows) * sizeof(unsigned char));
cudaMalloc((void**) &d_dst3, (sobelFiltered.cols-2) * (sobelFiltered.rows-2) * sizeof(unsigned char));
cudaMemcpy(d_src3, sobelFiltered.data, (sobelFiltered.cols) * (sobelFiltered.rows) * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(d_angleMap2, sobelFiltered.data, (sobelFiltered.cols) * (sobelFiltered.rows) * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(d_dst3, nonMaxSuppressed.data, (sobelFiltered.cols-2) * (sobelFiltered.rows-2) * sizeof(unsigned char), cudaMemcpyHostToDevice);
dim3 blocks3((sobelFiltered.cols - 2) / GRIDVAL + 1, (sobelFiltered.rows - 2) / GRIDVAL + 1);
_nonmax_suppression_<<<blocks3, threads>>>(d_src3, d_angleMap2, d_dst3, sobelFiltered.rows-2, sobelFiltered.rows);
// waits until is done
cudaDeviceSynchronize();
cudaMemcpy(nonMaxSuppressed.data, d_dst3, (sobelFiltered.cols-2) * (sobelFiltered.rows-2) * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(d_src3);
cudaFree(d_angleMap2);
cudaFree(d_dst3);
// STEP 4: STRONG EDGES CONCATENATION
cv::Mat outputImage = cv::Mat(nonMaxSuppressed.rows, nonMaxSuppressed.cols, CV_8UC1);
unsigned char* d_src4 = nullptr;
unsigned char* d_dst4 = nullptr;
cudaMalloc((void**) &d_src4, nonMaxSuppressed.rows*nonMaxSuppressed.cols*sizeof(unsigned char));
cudaMalloc((void**) &d_dst4, nonMaxSuppressed.rows*nonMaxSuppressed.cols*sizeof(unsigned char));
cudaMemcpy(d_src4, nonMaxSuppressed.data, nonMaxSuppressed.rows * nonMaxSuppressed.cols * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(d_dst4, outputImage.data, nonMaxSuppressed.rows * nonMaxSuppressed.cols * sizeof(unsigned char), cudaMemcpyHostToDevice);
_edges_concatenation_<<<blocks3, threads>>>(d_src4, d_dst4, HIGH_THRESHOLD, LOW_THRESHOLD, nonMaxSuppressed.cols);
// waits until is done
cudaDeviceSynchronize();
cudaMemcpy(outputImage.data, d_dst4, nonMaxSuppressed.rows * nonMaxSuppressed.cols * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(d_src4);
cudaFree(d_dst4);
return outputImage;
}
|
1370373bb31d8b48a4993247e84b9cc2aeacba38.hip | // !!! This is a file automatically generated by hipify!!!
#include "efficient.h"
#include <cassert>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer() {
static PerformanceTimer timer;
return timer;
}
// this many elements are processed by one block. cuda block size is half this
/*constexpr int log2BlockSize = 10; // 1024*/
/*constexpr int log2BlockSize = 9; // 512*/
/*constexpr int log2BlockSize = 8; // 256*/
constexpr int log2BlockSize = 7; // 128
/*constexpr int log2BlockSize = 6; // 64*/
constexpr int blockSize = 1 << log2BlockSize;
constexpr int log2BankSize = 5;
__device__ int conflictFreeIndex(int i) {
return i + (i >> log2BankSize);
}
__global__ void kernScanPerBlock(int *data, int *lastData) {
extern __shared__ int buffer[];
data += blockIdx.x * blockDim.x * 2;
int offset1 = conflictFreeIndex(threadIdx.x), offset2 = conflictFreeIndex(threadIdx.x + blockDim.x);
// copy data to shared memory
buffer[offset1] = data[threadIdx.x];
buffer[offset2] = data[threadIdx.x + blockDim.x];
int lastElem = 0;
if (lastData && threadIdx.x == blockDim.x - 1) {
lastElem = buffer[offset2];
}
__syncthreads();
// upward pass
for (int halfGap = 1; halfGap < blockDim.x; halfGap <<= 1) {
if (threadIdx.x < blockDim.x / halfGap) {
int
id1 = conflictFreeIndex((threadIdx.x * 2 + 1) * halfGap - 1),
id2 = conflictFreeIndex((threadIdx.x * 2 + 2) * halfGap - 1);
buffer[id2] += buffer[id1];
}
__syncthreads();
}
if (threadIdx.x == blockDim.x - 1) {
buffer[conflictFreeIndex(blockDim.x * 2 - 1)] = buffer[offset1];
buffer[offset1] = 0;
}
__syncthreads();
// downward pass
for (int halfGap = blockDim.x >> 1; halfGap >= 1; halfGap >>= 1) {
if (threadIdx.x < blockDim.x / halfGap) {
int prevIdx = (threadIdx.x * 2 + 1) * halfGap - 1;
int thisIdx = prevIdx + halfGap;
prevIdx = conflictFreeIndex(prevIdx);
thisIdx = conflictFreeIndex(thisIdx);
int sum = buffer[thisIdx] + buffer[prevIdx];
buffer[prevIdx] = buffer[thisIdx];
buffer[thisIdx] = sum;
}
__syncthreads();
}
// copy data back
data[threadIdx.x] = buffer[offset1];
data[threadIdx.x + blockDim.x] = buffer[offset2];
if (lastData && threadIdx.x == blockDim.x - 1) {
lastData[blockIdx.x] = lastElem + buffer[offset2];
}
}
__global__ void kernAddConstantToBlock(int *data, const int *amount) {
data[blockIdx.x * blockDim.x + threadIdx.x] += amount[blockIdx.x];
}
void _computeSizes(int n, int log2BlockSize, int *numBlocks, int *bufferSize) {
*numBlocks = n >> log2BlockSize;
if ((n & ((1 << log2BlockSize) - 1)) != 0) {
++*numBlocks;
}
*bufferSize = *numBlocks << log2BlockSize;
}
void dev_scan(int n, int *dev_data) {
assert((n & (blockSize - 1)) == 0);
if (n > blockSize) {
int numBlocks = n >> log2BlockSize, numIndirectBlocks, indirectSize;
_computeSizes(numBlocks, log2BlockSize, &numIndirectBlocks, &indirectSize);
int *buffer;
hipMalloc(&buffer, sizeof(int) * indirectSize);
hipLaunchKernelGGL(( kernScanPerBlock),
dim3(numBlocks), dim3(blockSize / 2), (blockSize + (blockSize >> log2BankSize)) * sizeof(int)
, 0, dev_data, buffer);
dev_scan(indirectSize, buffer);
hipLaunchKernelGGL(( kernAddConstantToBlock), dim3(numBlocks), dim3(blockSize), 0, 0, dev_data, buffer);
hipFree(buffer);
} else { // just scan the block
hipLaunchKernelGGL(( kernScanPerBlock),
dim3(1), dim3(blockSize / 2), (blockSize + (blockSize >> log2BankSize)) * sizeof(int)
, 0, dev_data, nullptr);
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int numBlocks, bufferSize;
_computeSizes(n, log2BlockSize, &numBlocks, &bufferSize);
int *buffer;
hipMalloc(&buffer, sizeof(int) * bufferSize);
hipMemcpy(buffer, idata, sizeof(int) * n, hipMemcpyHostToDevice);
// if integer overflow on the GPU were well-defined we would be able to get away without zeroing the rest
hipMemset(buffer + n, 0, sizeof(int) * (bufferSize - n));
timer().startGpuTimer();
dev_scan(bufferSize, buffer);
timer().endGpuTimer();
odata[0] = 0;
hipMemcpy(odata, buffer, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(buffer);
checkCUDAError("efficient scan");
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
constexpr int log2ScatterBlockSize = 6;
constexpr int scatterBlockSize = 1 << log2ScatterBlockSize;
int numBlocks, bufferSize;
_computeSizes(n, log2BlockSize, &numBlocks, &bufferSize);
int numScatterBlocks = (n + scatterBlockSize - 1) >> log2ScatterBlockSize;
int *data, *accum, *out;
hipMalloc(&data, sizeof(int) * bufferSize);
hipMalloc(&accum, sizeof(int) * bufferSize);
hipMalloc(&out, sizeof(int) * bufferSize);
hipMemcpy(data, idata, sizeof(int) * n, hipMemcpyHostToDevice);
timer().startGpuTimer();
hipLaunchKernelGGL(( Common::kernMapToBoolean), dim3(numBlocks), dim3(blockSize), 0, 0, n, accum, data);
dev_scan(bufferSize, accum);
hipLaunchKernelGGL(( Common::kernScatter), dim3(numScatterBlocks), dim3((1 << log2ScatterBlockSize)), 0, 0, 0, n, out, data, data, accum);
timer().endGpuTimer();
int last = idata[n - 1] != 0 ? 1 : 0, res;
hipMemcpy(&res, accum + n - 1, sizeof(int), hipMemcpyDeviceToHost);
res += last;
hipMemcpy(odata, out, sizeof(int) * res, hipMemcpyDeviceToHost);
checkCUDAError("efficient compaction");
hipFree(data);
hipFree(accum);
hipFree(out);
return res;
}
__global__ void kernExtractBit(int n, int bit, int *odata, const int *idata) {
int iSelf = blockIdx.x * blockDim.x + threadIdx.x;
if (iSelf >= n) {
return;
}
odata[iSelf] = (idata[iSelf] & (1 << bit)) != 0 ? 1 : 0;
}
__global__ void kernNegate(int *odata, const int *idata) {
int iSelf = blockIdx.x * blockDim.x + threadIdx.x;
odata[iSelf] = idata[iSelf] == 0 ? 1 : 0;
}
__global__ void kernRadixSortScatter(
int n, int numFalses, int bit, int *odata, const int *idata, const int *trues, const int *falses
) {
int iSelf = blockIdx.x * blockDim.x + threadIdx.x;
if (iSelf >= n) {
return;
}
int value = idata[iSelf], index;
if ((value & (1 << bit)) != 0) {
index = trues[iSelf] + numFalses;
} else {
index = falses[iSelf];
}
odata[index] = value;
}
void radix_sort(int n, int *odata, const int *idata) {
constexpr int numIntBits = sizeof(int) * 8 - 1;
int numBlocks, bufferSize;
_computeSizes(n, log2BlockSize, &numBlocks, &bufferSize);
int *data1, *data2, *trues, *falses;
hipMalloc(&data1, sizeof(int) * n);
hipMalloc(&data2, sizeof(int) * n);
hipMalloc(&trues, sizeof(int) * bufferSize);
hipMalloc(&falses, sizeof(int) * bufferSize);
hipMemcpy(data1, idata, sizeof(int) * n, hipMemcpyHostToDevice);
timer().startGpuTimer();
for (int i = 0; i < numIntBits; ++i) {
hipLaunchKernelGGL(( kernExtractBit), dim3(numBlocks), dim3(blockSize), 0, 0, n, i, trues, data1);
hipLaunchKernelGGL(( kernNegate), dim3(numBlocks), dim3(blockSize), 0, 0, falses, trues);
dev_scan(bufferSize, trues);
dev_scan(bufferSize, falses);
int numFalses, lastElem;
hipMemcpy(&lastElem, data1 + (n - 1), sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&numFalses, falses + (n - 1), sizeof(int), hipMemcpyDeviceToHost);
if ((lastElem & (1 << i)) == 0) {
++numFalses;
}
hipLaunchKernelGGL(( kernRadixSortScatter), dim3(numBlocks), dim3(blockSize), 0, 0, n, numFalses, i, data2, data1, trues, falses);
std::swap(data1, data2);
}
timer().endGpuTimer();
hipMemcpy(odata, data1, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(data1);
hipFree(data2);
hipFree(trues);
hipFree(falses);
checkCUDAError("radix sort");
}
}
}
| 1370373bb31d8b48a4993247e84b9cc2aeacba38.cu | #include "efficient.h"
#include <cassert>
#include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer() {
static PerformanceTimer timer;
return timer;
}
// this many elements are processed by one block. cuda block size is half this
/*constexpr int log2BlockSize = 10; // 1024*/
/*constexpr int log2BlockSize = 9; // 512*/
/*constexpr int log2BlockSize = 8; // 256*/
constexpr int log2BlockSize = 7; // 128
/*constexpr int log2BlockSize = 6; // 64*/
constexpr int blockSize = 1 << log2BlockSize;
constexpr int log2BankSize = 5;
__device__ int conflictFreeIndex(int i) {
return i + (i >> log2BankSize);
}
__global__ void kernScanPerBlock(int *data, int *lastData) {
extern __shared__ int buffer[];
data += blockIdx.x * blockDim.x * 2;
int offset1 = conflictFreeIndex(threadIdx.x), offset2 = conflictFreeIndex(threadIdx.x + blockDim.x);
// copy data to shared memory
buffer[offset1] = data[threadIdx.x];
buffer[offset2] = data[threadIdx.x + blockDim.x];
int lastElem = 0;
if (lastData && threadIdx.x == blockDim.x - 1) {
lastElem = buffer[offset2];
}
__syncthreads();
// upward pass
for (int halfGap = 1; halfGap < blockDim.x; halfGap <<= 1) {
if (threadIdx.x < blockDim.x / halfGap) {
int
id1 = conflictFreeIndex((threadIdx.x * 2 + 1) * halfGap - 1),
id2 = conflictFreeIndex((threadIdx.x * 2 + 2) * halfGap - 1);
buffer[id2] += buffer[id1];
}
__syncthreads();
}
if (threadIdx.x == blockDim.x - 1) {
buffer[conflictFreeIndex(blockDim.x * 2 - 1)] = buffer[offset1];
buffer[offset1] = 0;
}
__syncthreads();
// downward pass
for (int halfGap = blockDim.x >> 1; halfGap >= 1; halfGap >>= 1) {
if (threadIdx.x < blockDim.x / halfGap) {
int prevIdx = (threadIdx.x * 2 + 1) * halfGap - 1;
int thisIdx = prevIdx + halfGap;
prevIdx = conflictFreeIndex(prevIdx);
thisIdx = conflictFreeIndex(thisIdx);
int sum = buffer[thisIdx] + buffer[prevIdx];
buffer[prevIdx] = buffer[thisIdx];
buffer[thisIdx] = sum;
}
__syncthreads();
}
// copy data back
data[threadIdx.x] = buffer[offset1];
data[threadIdx.x + blockDim.x] = buffer[offset2];
if (lastData && threadIdx.x == blockDim.x - 1) {
lastData[blockIdx.x] = lastElem + buffer[offset2];
}
}
__global__ void kernAddConstantToBlock(int *data, const int *amount) {
data[blockIdx.x * blockDim.x + threadIdx.x] += amount[blockIdx.x];
}
void _computeSizes(int n, int log2BlockSize, int *numBlocks, int *bufferSize) {
*numBlocks = n >> log2BlockSize;
if ((n & ((1 << log2BlockSize) - 1)) != 0) {
++*numBlocks;
}
*bufferSize = *numBlocks << log2BlockSize;
}
void dev_scan(int n, int *dev_data) {
assert((n & (blockSize - 1)) == 0);
if (n > blockSize) {
int numBlocks = n >> log2BlockSize, numIndirectBlocks, indirectSize;
_computeSizes(numBlocks, log2BlockSize, &numIndirectBlocks, &indirectSize);
int *buffer;
cudaMalloc(&buffer, sizeof(int) * indirectSize);
kernScanPerBlock<<<
numBlocks, blockSize / 2, (blockSize + (blockSize >> log2BankSize)) * sizeof(int)
>>>(dev_data, buffer);
dev_scan(indirectSize, buffer);
kernAddConstantToBlock<<<numBlocks, blockSize>>>(dev_data, buffer);
cudaFree(buffer);
} else { // just scan the block
kernScanPerBlock<<<
1, blockSize / 2, (blockSize + (blockSize >> log2BankSize)) * sizeof(int)
>>>(dev_data, nullptr);
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int numBlocks, bufferSize;
_computeSizes(n, log2BlockSize, &numBlocks, &bufferSize);
int *buffer;
cudaMalloc(&buffer, sizeof(int) * bufferSize);
cudaMemcpy(buffer, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
// if integer overflow on the GPU were well-defined we would be able to get away without zeroing the rest
cudaMemset(buffer + n, 0, sizeof(int) * (bufferSize - n));
timer().startGpuTimer();
dev_scan(bufferSize, buffer);
timer().endGpuTimer();
odata[0] = 0;
cudaMemcpy(odata, buffer, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(buffer);
checkCUDAError("efficient scan");
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
constexpr int log2ScatterBlockSize = 6;
constexpr int scatterBlockSize = 1 << log2ScatterBlockSize;
int numBlocks, bufferSize;
_computeSizes(n, log2BlockSize, &numBlocks, &bufferSize);
int numScatterBlocks = (n + scatterBlockSize - 1) >> log2ScatterBlockSize;
int *data, *accum, *out;
cudaMalloc(&data, sizeof(int) * bufferSize);
cudaMalloc(&accum, sizeof(int) * bufferSize);
cudaMalloc(&out, sizeof(int) * bufferSize);
cudaMemcpy(data, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
timer().startGpuTimer();
Common::kernMapToBoolean<<<numBlocks, blockSize>>>(n, accum, data);
dev_scan(bufferSize, accum);
Common::kernScatter<<<numScatterBlocks, (1 << log2ScatterBlockSize)>>>(n, out, data, data, accum);
timer().endGpuTimer();
int last = idata[n - 1] != 0 ? 1 : 0, res;
cudaMemcpy(&res, accum + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
res += last;
cudaMemcpy(odata, out, sizeof(int) * res, cudaMemcpyDeviceToHost);
checkCUDAError("efficient compaction");
cudaFree(data);
cudaFree(accum);
cudaFree(out);
return res;
}
__global__ void kernExtractBit(int n, int bit, int *odata, const int *idata) {
int iSelf = blockIdx.x * blockDim.x + threadIdx.x;
if (iSelf >= n) {
return;
}
odata[iSelf] = (idata[iSelf] & (1 << bit)) != 0 ? 1 : 0;
}
__global__ void kernNegate(int *odata, const int *idata) {
int iSelf = blockIdx.x * blockDim.x + threadIdx.x;
odata[iSelf] = idata[iSelf] == 0 ? 1 : 0;
}
__global__ void kernRadixSortScatter(
int n, int numFalses, int bit, int *odata, const int *idata, const int *trues, const int *falses
) {
int iSelf = blockIdx.x * blockDim.x + threadIdx.x;
if (iSelf >= n) {
return;
}
int value = idata[iSelf], index;
if ((value & (1 << bit)) != 0) {
index = trues[iSelf] + numFalses;
} else {
index = falses[iSelf];
}
odata[index] = value;
}
void radix_sort(int n, int *odata, const int *idata) {
constexpr int numIntBits = sizeof(int) * 8 - 1;
int numBlocks, bufferSize;
_computeSizes(n, log2BlockSize, &numBlocks, &bufferSize);
int *data1, *data2, *trues, *falses;
cudaMalloc(&data1, sizeof(int) * n);
cudaMalloc(&data2, sizeof(int) * n);
cudaMalloc(&trues, sizeof(int) * bufferSize);
cudaMalloc(&falses, sizeof(int) * bufferSize);
cudaMemcpy(data1, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
timer().startGpuTimer();
for (int i = 0; i < numIntBits; ++i) {
kernExtractBit<<<numBlocks, blockSize>>>(n, i, trues, data1);
kernNegate<<<numBlocks, blockSize>>>(falses, trues);
dev_scan(bufferSize, trues);
dev_scan(bufferSize, falses);
int numFalses, lastElem;
cudaMemcpy(&lastElem, data1 + (n - 1), sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&numFalses, falses + (n - 1), sizeof(int), cudaMemcpyDeviceToHost);
if ((lastElem & (1 << i)) == 0) {
++numFalses;
}
kernRadixSortScatter<<<numBlocks, blockSize>>>(n, numFalses, i, data2, data1, trues, falses);
std::swap(data1, data2);
}
timer().endGpuTimer();
cudaMemcpy(odata, data1, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(data1);
cudaFree(data2);
cudaFree(trues);
cudaFree(falses);
checkCUDAError("radix sort");
}
}
}
|
39793d3ba9841ff863bce0db443a23ed6d635eb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <aggregation/selectors/multi_pairwise.h>
#include <cutil.h>
#include <util.h>
#include <types.h>
#include <basic_types.h>
#include <texture.h>
#include <matrix_analysis.h>
#include <transpose.h>
#include <async_event.h>
#include <thrust/count.h> //count
#include <thrust/sort.h> //sort
#include <thrust/binary_search.h> //lower_bound
#include <thrust/unique.h> //unique
#include <thrust/host_vector.h>
#include <cusp/detail/format_utils.h> //offsets_to_indices
#include <determinism_checker.h>
#include <solvers/solver.h>
#include <aggregation/coarseAgenerators/thrust_coarse_A_generator.h>
#include <aggregation/coarseAgenerators/low_deg_coarse_A_generator.h>
#include <omp.h>
#define EXPERIMENTAL_ITERATIVE_MATCHING
namespace amgx
{
namespace aggregation
{
namespace multi_pairwise
{
// include common routines for all selectors
#include <aggregation/selectors/common_selector.h>
// ------------------------
// Kernels
// ------------------------
__device__
float random_weight2(int i, int j)
{
#define RAND_MULTIPLIER 1145637293
unsigned long i_min = (min(i, j) * RAND_MULTIPLIER);
unsigned long i_max = (max(i, j) * RAND_MULTIPLIER);
return ((float)i_min / i_max);
}
__device__
unsigned long random_weight3(int i, int j)
{
unsigned long a;
a = (i + j) ^ 8;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
return a;
}
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// Reads the weight from edge_weights array
template <typename IndexType, typename MatrixValueType>
__global__
void findStrongestNeighbourBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices,
MatrixValueType *edge_weights, IndexType num_block_rows, IndexType *aggregates,
IndexType *strongest_neighbour_1phase, IndexType *strongest_neighbour,
const size_t bsize, int phase, int merge_singletons)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
MatrixValueType weight;
int jcol;
while (tid < num_block_rows)
{
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
MatrixValueType max_weight_unaggregated = 0.;
MatrixValueType max_weight_aggregated = 0.;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (phase == 1) { weight = edge_weights[j]; }
else { weight = random_weight2(tid, jcol); }
if (tid == jcol || jcol >= num_block_rows) { continue; } // skip diagonal and halo
if (phase == 2 && strongest_neighbour_1phase[jcol] != tid) { continue; } // if 2nd phase only accept those who gave a hand on the 1st phase
// Identify strongest aggregated and unaggregated neighbours (method by multi_pairwise)
if (aggregates[jcol] == -1 && weight > 0.0 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && random_weight3(tid, jcol) > random_weight3(tid, strongest_unaggregated)))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (aggregates[jcol] != -1 && weight > 0.0 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && random_weight3(tid, jcol) > random_weight3(tid, strongest_aggregated)))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated
{
if ( merge_singletons == 1 )
// Put in same aggregate as strongest neighbour
{
aggregates[tid] = aggregates[strongest_aggregated];
}
else
// create singleton
{
aggregates[tid] = tid;
}
}
else if (strongest_unaggregated != -1)
{
if (phase == 2)
{
MatrixValueType rand_w1 = random_weight2(tid, strongest_neighbour_1phase[tid]);
strongest_neighbour[tid] = max_weight_unaggregated > rand_w1 ? strongest_unaggregated : strongest_neighbour_1phase[tid];
}
else { strongest_neighbour_1phase[tid] = strongest_unaggregated; }
}
else
{
if (phase == 2) { strongest_neighbour[tid] = strongest_neighbour_1phase[tid]; }
else { strongest_neighbour_1phase[tid] = tid; }
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType>
__device__
bool atomicJoin( IndexType node, IndexType aggregate, IndexType *aggregates, int *sizes, int allowed )
{
int mySize = sizes[node];
int theirSize = sizes[aggregate];
int theirSizeOld = theirSize;
do
{
int newSize = mySize + theirSize;
if ( newSize > allowed )
{
return false;
}
theirSizeOld = theirSize;
theirSize = atomicCAS( &sizes[aggregate], theirSize, newSize );
}
while ( theirSize != theirSizeOld );
aggregates[node] = aggregate;
return true;
}
template <typename IndexType, typename MatrixValueType, bool use_degree>
__global__
void findStrongestNeighbourBlockDiaCsr_V3(const IndexType *row_offsets,
const IndexType *column_indices,
MatrixValueType *edge_weights,
IndexType num_block_rows,
IndexType *aggregates,
IndexType *strongest_neighbour,
int *sizes,
int *degree,
const size_t bsize,
int max_aggregate_size,
int merge_singletons)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
MatrixValueType weight;
int jcol;
while (tid < num_block_rows)
{
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
int lowest_degree;
if ( use_degree )
{
lowest_degree = degree[tid]; //only interested in finding lower degree than self
}
else
{
lowest_degree = 0; //if we decide to not use degree than just propose to the strongest edge
}
int lowest_degree_neighbor = tid;
MatrixValueType lowest_degree_weight = 1e100; //high start value, so that same degree neighbor won't become lowest degree neighbor
MatrixValueType max_weight_unaggregated = 0.;
MatrixValueType max_weight_aggregated = 0.;
int mySize;
if ( merge_singletons == 2 )
{
mySize = sizes[tid];
}
else
{
mySize = 0;
}
if ( merge_singletons != 2 )
{
max_aggregate_size = 100000;
}
//this aggregate is already full
if (mySize == max_aggregate_size)
{
aggregates[tid] = tid;
}
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (tid == jcol || jcol >= num_block_rows) { continue; } // skip diagonal and halo
weight = edge_weights[j];
if (weight <= 0.0) { continue; }
if ( aggregates[jcol] != -1 ) //aggregated neighbor
{
int theirSize;
if ( merge_singletons == 2 )
{
theirSize = aggregates[sizes[jcol]];
}
else
{
theirSize = 0;
}
//if all neighbors are aggregated, find the strongest edge to neighbor aggregate that is not full yet
if (mySize + theirSize <= max_aggregate_size &&
(weight > max_weight_aggregated)) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
else //unaggregated neighbor
{
if ( use_degree && merge_singletons == 2 )
{
int theirSize = sizes[jcol];
//get lowest degree neighbor or find out that there is no lower degree neighbor
int current_degree = degree[jcol];
if ( mySize + theirSize <= max_aggregate_size && (current_degree < lowest_degree || current_degree == lowest_degree && weight > lowest_degree_weight) )
{
lowest_degree = current_degree;
lowest_degree_weight = weight;
lowest_degree_neighbor = jcol;
}
//get highest weight neighbor
if ( mySize + theirSize <= max_aggregate_size && (weight > max_weight_unaggregated) )
{
strongest_unaggregated = jcol;
max_weight_unaggregated = weight;
}
}
if ( use_degree && merge_singletons != 2 ) //same as above but ignore sizes
{
//get lowest degree neighbor or find out that there is no lower degree neighbor
int current_degree = degree[jcol];
if ( current_degree < lowest_degree || current_degree == lowest_degree && weight > lowest_degree_weight)
{
lowest_degree = current_degree;
lowest_degree_weight = weight;
lowest_degree_neighbor = jcol;
}
//get highest weight neighbor
if (weight > max_weight_unaggregated)
{
strongest_unaggregated = jcol;
max_weight_unaggregated = weight;
}
}
if ( !use_degree && merge_singletons == 2 )
{
//get highest weight neighbor only but pay attention to the aggregate sizes
int theirSize = sizes[jcol]; //get highest weight neighbor
if ( mySize + theirSize <= max_aggregate_size && (weight > max_weight_unaggregated) )
{
strongest_unaggregated = jcol;
max_weight_unaggregated = weight;
}
}
if ( !use_degree && merge_singletons != 2 )
{
//just highest weight
if (weight > max_weight_unaggregated)
{
strongest_unaggregated = jcol;
max_weight_unaggregated = weight;
}
}
}
}
//prefer lowest degree neighbor
if ( lowest_degree_neighbor != tid )
{
strongest_unaggregated = lowest_degree_neighbor;
}
if (strongest_unaggregated != -1) //Unaggregated neighbor exists
{
strongest_neighbour[tid] = strongest_unaggregated; //assign strongest aggregated
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated but small enough aggregated neighbors exist
{
if ( merge_singletons == 0 )
{
aggregates[tid] = tid;
}
if ( merge_singletons == 1 )
{
aggregates[tid] = aggregates[strongest_aggregated];
}
if ( merge_singletons == 2)
{
atomicJoin( tid, aggregates[strongest_aggregated], aggregates, sizes, max_aggregate_size ); //try to join, can fail. maybe it works next round.
}
}
if (strongest_unaggregated == -1 && strongest_aggregated == -1) //no feasable neighbor at all, become singleton
{
strongest_neighbour[tid] = tid; //become singleton
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType, typename ValueType>
__global__
void computeDegree( const IndexType *ia, const IndexType *ja, const ValueType *weights, IndexType *aggregates, IndexType *sizes, IndexType *degree, IndexType numRows, IndexType max_aggregate_size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
while ( i < numRows )
{
int myDegree = 0;
IndexType ia_ip1 = ia[i + 1];
for ( IndexType ii = ia[i]; ii < ia_ip1; ii++ )
{
IndexType j = ja[ii];
if ( j == i )
{
continue;
}
int mySize, theirSize;
if ( sizes != NULL )
{
mySize = sizes[i];
theirSize = sizes[j];
}
else
{
mySize = theirSize = 0;
}
if ( weights[ii] > 0.0 && aggregates[j] == -1 && mySize + theirSize <= max_aggregate_size )
{
myDegree++;
}
}
degree[i] = myDegree;
i += gridDim.x * blockDim.x;
}
}
template <typename IndexType, typename ValueType>
__global__
void mergeSingletonsSmart(const IndexType *ia, const IndexType *ja, const ValueType *weights, IndexType *aggregates, IndexType *sizes, IndexType numRows, int max_aggregate_size)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < numRows )
{
//unaggregated nodes try to join or create their own aggregate
if ( aggregates[tid] == -1 )
{
bool joined = false;
while ( !joined )
{
int neighbor_aggregate = -1;
ValueType max_weight = 0.0;
IndexType mySize = sizes[tid];
for (IndexType ii = ia[tid]; ii < ia[tid + 1]; ii++)
{
IndexType j = ja[ii];
if (j == tid || j >= numRows) { continue; }
if ( aggregates[j] != -1 && sizes[aggregates[j]] + mySize <= max_aggregate_size && weights[ii] > max_weight )
{
neighbor_aggregate = aggregates[j];
max_weight = weights[ii];
}
}
//no possible neighbor found
if ( neighbor_aggregate == -1 )
{
//create own aggregate
aggregates[tid] = tid;
joined = true;
}
else
{
//try to join
joined = atomicJoin( tid, neighbor_aggregate, aggregates, sizes, max_aggregate_size );
}
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType>
__global__
void updateAggregateSizes( IndexType *sizesSource, IndexType *sizes, IndexType *aggregates, IndexType numRows )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < numRows )
{
IndexType agg = aggregates[tid];
IndexType aggregateSize = sizes[agg];
IndexType mySize = sizesSource[tid];
while ( mySize > aggregateSize )
{
aggregateSize = atomicCAS( &sizes[agg], aggregateSize, mySize );
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel that checks if perfect matchs exist
template <typename IndexType>
__global__
void matchEdges(const IndexType num_rows, IndexType *aggregates, int *strongest_neighbour, IndexType *sizes)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int potential_match, potential_match_neighbour;
while (tid < num_rows)
{
if (aggregates[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
potential_match_neighbour = strongest_neighbour[potential_match];
if ( potential_match == tid )
{
aggregates[tid] = tid;
}
else if (potential_match != -1 && potential_match_neighbour == tid && tid < potential_match) // we have a match
{
aggregates[tid] = tid;
aggregates[potential_match] = tid;
if ( sizes != NULL)
{
sizes[tid] += sizes[potential_match];
}
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType, int block_size>
__global__
void countAggregates(const IndexType num_rows, IndexType *aggregates, int *num_unaggregated)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int c = 0;
int i = tid;
while ( i < num_rows )
{
c += ( aggregates[i] == -1 );
i += gridDim.x * blockDim.x;
}
__shared__ volatile int smem[block_size];
smem[threadIdx.x] = c;
__syncthreads();
for ( int off = blockDim.x / 2; off >= 32; off = off / 2 )
{
if ( threadIdx.x < off )
{
smem[threadIdx.x] += smem[threadIdx.x + off];
}
__syncthreads();
}
// warp reduce
if ( threadIdx.x < 32 )
{
smem[threadIdx.x] += smem[threadIdx.x + 16];
smem[threadIdx.x] += smem[threadIdx.x + 8];
smem[threadIdx.x] += smem[threadIdx.x + 4];
smem[threadIdx.x] += smem[threadIdx.x + 2];
smem[threadIdx.x] += smem[threadIdx.x + 1];
}
if ( threadIdx.x == 0 )
{
atomicAdd(num_unaggregated, smem[0]);
}
}
template <typename IndexType>
__global__
void joinExistingAggregates(IndexType num_rows, IndexType *aggregates, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while (tid < num_rows)
{
if (aggregates[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row
{
aggregates[tid] = aggregates_candidate[tid];
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel that merges unaggregated vertices its strongest aggregated neighbour
// Weights are read from edge_weights array
// For block_dia_csr_matrix_format
template <typename IndexType, typename MatrixValueType>
__global__
void mergeWithExistingAggregatesBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices, const MatrixValueType *edge_weights,
const int num_block_rows, IndexType *aggregates, int bsize, const int deterministic, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int jcol;
MatrixValueType weight;
while (tid < num_block_rows)
{
MatrixValueType max_weight_aggregated = 0.;
int strongest_aggregated = -1;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
// Compute edge weight
weight = edge_weights[j];
jcol = column_indices[j];
if (jcol == tid || jcol >= num_block_rows) { continue; } // skip diagonal
if ( aggregates[jcol] == num_block_rows ) { continue; } // skip dd rows
// Identify strongest aggregated neighbour
if (aggregates[jcol] != -1 && weight > 0 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && random_weight3( tid, jcol ) > random_weight3( tid, strongest_aggregated )))) //
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_aggregated != -1) // Found a neighbour to aggregate to
{
if (deterministic == 1)
{
aggregates_candidate[tid] = aggregates[strongest_aggregated];
}
else
{
// Put in same aggregate as strongest neighbour
aggregates[tid] = aggregates[strongest_aggregated];
}
}
else // All neighbours are unaggregated, leave alone
{
if (deterministic == 1)
{
aggregates_candidate[tid] = tid;
}
else
{
aggregates[tid] = tid;
}
}
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel to extract diagonal for csr_matrix format
template <typename IndexType, typename ValueType>
__global__
void getDiagonalKernel(const IndexType *offsets, const IndexType *column_indices,
const ValueType *values, const IndexType numRows, ValueType *diagonal)
{
int tIdx = threadIdx.x + blockDim.x * blockIdx.x;
while (tIdx < numRows)
{
const int offset = offsets[tIdx];
const int numj = offsets[tIdx + 1] - offset;
for (int j = offset; j < offset + numj; j++)
{
int jcol = column_indices[j];
if (tIdx == jcol)
{
diagonal[tIdx] = values[j];
}
}
tIdx += gridDim.x * blockDim.x;
}
}
// Kernel to extract diagonal for csr_matrix format
template <typename IndexType, typename ValueType>
__global__
void getDiagonalKernelNoDiaProp(const IndexType *dia_idx, const ValueType *values, const IndexType numRows, ValueType *diagonal)
{
int tIdx = threadIdx.x + blockDim.x * blockIdx.x;
while (tIdx < numRows)
{
diagonal[tIdx] = values[dia_idx[tIdx]];
tIdx += gridDim.x * blockDim.x;
}
}
// filter edge weights like this:
// set w_ij = 0 iff
// w_ij < alpha * sqrt( max_k{w_ik} * max_l{w_jl} )
// alpha is some constant, 0.25 or 0.5 should work fine
template<typename IndexType, typename ValueType>
__global__
void filterWeights( const IndexType *row_offsets, const IndexType *row_indices, const IndexType *col_indices, const IndexType *diag, const ValueType *old_weights, ValueType *new_weights, IndexType num_nonzero_blocks, IndexType num_owned, ValueType alpha )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int i, j, kmin, kmax;
ValueType max_ik, max_jl;
while ( tid < num_nonzero_blocks )
{
i = row_indices[tid];
j = col_indices[tid];
if ( i != j && j < num_owned )
{
//find max_k{w_ik}
kmin = row_offsets[i];
kmax = row_offsets[i + 1];
max_ik = 0.0;
for (int k = kmin; k < kmax; k++)
{
if ( col_indices[k] != i && old_weights[k] > max_ik )
{
max_ik = old_weights[k];
}
}
//find max_l{w_jl}
kmin = row_offsets[j];
kmax = row_offsets[j + 1];
max_jl = 0.0;
for (int l = kmin; l < kmax; l++)
{
if ( col_indices[l] != j && old_weights[l] > max_jl )
{
max_jl = old_weights[l];
}
}
//test squared inequality
if ( old_weights[tid] * old_weights[tid] < alpha * alpha * max_ik * max_jl )
{
new_weights[tid] = 0.0;
}
else //rescale to relative importance. this should also increase the chance of a handshake
{
new_weights[tid] = old_weights[tid];
}
// new_weights[tid] = old_weights[tid] / sqrt(max_ik*max_jl);
}
tid += gridDim.x * blockDim.x;
}
}
template<typename IndexType, typename ValueType>
__global__
void gatherValuesInterleaved( const ValueType *inValues, ValueType *outValues, IndexType nnz, int sq_blocksize, int index_offset )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < nnz )
{
//at least the write is coalesced
outValues[tid] = inValues[tid * sq_blocksize + index_offset];
tid += gridDim.x * blockDim.x;
}
}
template<typename IndexType, typename ValueTypeV, typename ValueTypeM>
__global__
void addToWeights( ValueTypeM *edge_weights, const ValueTypeV *x, const IndexType *row_indices, IndexType *col_indices, IndexType nnz, double scale )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < nnz )
{
int i = row_indices[tid];
int j = col_indices[tid];
edge_weights[tid] -= static_cast<ValueTypeM>( scale * fabs( x[i] - x[j] ) );
tid += gridDim.x * blockDim.x;
}
}
template <typename ValueType, typename IndexType>
__global__
void rescaleVector( ValueType *x, IndexType numRows )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < numRows )
{
x[tid] = 2 * x[tid] - 1;
tid += gridDim.x * blockDim.x;
}
}
// -----------------
// Methods
// ----------------
// Constructor
template<class T_Config>
MultiPairwiseSelectorBase<T_Config>::MultiPairwiseSelectorBase(AMG_Config &cfg, const std::string &cfg_scope)
{
deterministic = cfg.AMG_Config::template getParameter<IndexType>("determinism_flag", "default");
max_iterations = cfg.AMG_Config::template getParameter<IndexType>("max_matching_iterations", cfg_scope);
numUnassigned_tol = cfg.AMG_Config::template getParameter<double>("max_unassigned_percentage", cfg_scope);
two_phase = cfg.AMG_Config::template getParameter<int>("handshaking_phases", cfg_scope) == 2;
m_aggregation_edge_weight_component = cfg.AMG_Config::template getParameter<int>("aggregation_edge_weight_component", cfg_scope);
aggregation_passes = cfg.AMG_Config::template getParameter<int>("aggregation_passes", cfg_scope); //default to size 8 aggregates. maybe its more convenient to have that as a config parameter
filter_weights = cfg.AMG_Config::template getParameter<int>("filter_weights", cfg_scope); //by default: no filtering
filter_weights_alpha = cfg.AMG_Config::template getParameter<double>( "filter_weights_alpha", cfg_scope ); //default to 0.25
full_ghost_level = cfg.AMG_Config::template getParameter<int>( "full_ghost_level", cfg_scope ); //defaults to 0
notay_weights = cfg.AMG_Config::template getParameter<int>( "notay_weights", cfg_scope ); //defaults to 0
ghost_offdiag_limit = cfg.AMG_Config::template getParameter<int>( "ghost_offdiag_limit", cfg_scope ); //defaults to 0
merge_singletons = cfg.AMG_Config::template getParameter<int>( "merge_singletons", cfg_scope ); //defaults to 1
weight_formula = cfg.AMG_Config::template getParameter<int>( "weight_formula", cfg_scope ); //wheight formula defaults to 0
serial_matching = cfg.AMG_Config::template getParameter<int>( "serial_matching", cfg_scope ) != 0; //will use a serial matching algorithm instead of handshake
modified_handshake = cfg.AMG_Config::template getParameter<int>("modified_handshake", cfg_scope ) == 1;
//passes = 1 -> max = 3
//passes = 2 -> max = 5
//passes = 3 -> max = 10
//passes = 4 -> max = 18
max_aggregate_size = 2;
for (int i = 1; i < aggregation_passes; i ++)
{
max_aggregate_size *= 2;
}
max_aggregate_size += aggregation_passes - (aggregation_passes / 2);
mCfg = cfg;
mCfg_scope = cfg_scope;
}
// setAggregates for block_dia_csr_matrix_h format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiPairwiseSelector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(Matrix_h &A,
typename Matrix_h::IVector &aggregates, typename Matrix_h::IVector &aggregates_global, int &num_aggregates, MVector &edge_weights, IVector &sizes)
{
FatalError("MultiPairwise selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
// device specialization
//edge_weights is an in/out parameter:
//if its size is zero, the edge_weights will be computed from A and stored into edge_weights
//else the edge_weights will not be computed and assumed to be valid for the given A. the value array of A is not used in this case
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiPairwiseSelector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(Matrix_d &A,
typename Matrix_d::IVector &aggregates,
typename Matrix_d::IVector &aggregates_global,
int &num_aggregates,
MVector &edge_weights,
IVector &sizes)
{
IndexType num_block_rows = (int) A.get_num_rows();
IndexType num_nonzero_blocks = (int) A.get_num_nz();
// both ways are supported
IndexType total_nz = (A.is_matrix_singleGPU()) ? num_nonzero_blocks : A.manager->num_nz_all();
typename Matrix_d::IVector &row_indices = A.row_indices;
row_indices.resize( total_nz);
cusp::detail::offsets_to_indices(A.row_offsets, row_indices);
IndexType total_rows = (A.is_matrix_singleGPU()) ? A.get_num_rows() : A.manager->num_rows_all();
aggregates.resize(total_rows);
thrust::fill(aggregates.begin(), aggregates.end(), -1);
cudaCheckError();
if ( this->merge_singletons == 2 && sizes.size() == 0 )
{
sizes.resize( total_rows, 1 ); //init with all ones
}
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_row_indices_ptr = row_indices.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
const ValueType *A_nonzero_values_ptr = A.values.raw();
typename Matrix_d::IVector strongest_neighbour(num_block_rows, -1);
typename Matrix_d::IVector strongest_neighbour_1phase(num_block_rows, -1);
Vector<TemplateConfig<AMGX_device, AMGX_vecUInt, t_matPrec, t_indPrec> > aggregated(num_block_rows, 0);
IndexType *strongest_neighbour_ptr = strongest_neighbour.raw();
IndexType *strongest_neighbour_1phase_ptr = strongest_neighbour_1phase.raw();
IndexType *aggregates_ptr = aggregates.raw();
const int threads_per_block = 256;
const int num_blocks = ::min( AMGX_GRID_MAX_SIZE, (num_block_rows - 1) / threads_per_block + 1 );
int numUnassigned = num_block_rows;
int numUnassigned_previous = numUnassigned;
bool computeWeights = ( edge_weights.size() == 0 );
if (computeWeights)
{
if ( A.hasProps( DIAG ) )
{
edge_weights.resize( num_nonzero_blocks + num_block_rows, 0.0 );
}
else
{
edge_weights.resize( num_nonzero_blocks + 1, -1 ); //+1 is important to some algorithms
}
}
ValueType *edge_weights_ptr = edge_weights.raw();
ValueType *rand_edge_weights_ptr = NULL;
hipStream_t str = thrust::global_thread_handle::get_stream();
// Compute the edge weights
if ( computeWeights )
{
const int num_blocks_V2 = ::min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1);
//compute with std formula
hipFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType, ValueType, ValueType>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( computeEdgeWeightsBlockDiaCsr_V2) , dim3(num_blocks_V2), dim3(threads_per_block), 0, str, A_row_offsets_ptr,
A_row_indices_ptr,
A_column_indices_ptr,
A_dia_idx_ptr,
A_nonzero_values_ptr,
num_nonzero_blocks,
edge_weights_ptr,
rand_edge_weights_ptr,
num_block_rows,
A.get_block_dimy(),
this->m_aggregation_edge_weight_component,
this->weight_formula);
cudaCheckError();
}
//filter weights if desired
if ( this->filter_weights == 1 )
{
MVector tmp( edge_weights.size() );
const int num_blocks_filter = ::min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1);
hipStreamSynchronize(str);
cudaCheckError();
hipLaunchKernelGGL(( filterWeights) , dim3(num_blocks_filter), dim3(threads_per_block), 0, str, A_row_offsets_ptr,
A_row_indices_ptr,
A_column_indices_ptr,
A_dia_idx_ptr,
edge_weights_ptr,
tmp.raw(),
num_nonzero_blocks,
num_block_rows,
this->filter_weights_alpha);
hipStreamSynchronize(str);
cudaCheckError();
tmp.swap( edge_weights );
edge_weights_ptr = edge_weights.raw();
}
// compute matching
if ( !this->serial_matching )
{
IVector degree;
if ( this->modified_handshake )
{
degree.resize( num_block_rows );
}
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
// TODO: allocate host pinned memory
AsyncEvent *throttle_event = new AsyncEvent;
throttle_event->create();
typename Matrix_h::IVector h_unagg_vec(1);
typename Matrix_d::IVector d_unagg_vec(1);
int *unaggregated = h_unagg_vec.raw();
int *d_unaggregated = d_unagg_vec.raw();
#endif
int icount, s = 1;
{
icount = 0;
ValueType *weights_ptr = edge_weights_ptr;
do
{
if ( !this->two_phase )
{
if ( this->modified_handshake )
hipLaunchKernelGGL(( computeDegree) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr,
A_column_indices_ptr,
weights_ptr,
aggregates_ptr,
sizes.raw(),
degree.raw(),
num_block_rows,
this->max_aggregate_size );
// 1-phase handshaking
if ( this->modified_handshake )
hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_V3<IndexType, ValueType, true>)
, dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr,
A_column_indices_ptr,
weights_ptr,
num_block_rows,
aggregates_ptr,
strongest_neighbour_ptr,
sizes.raw(),
degree.raw(),
A.get_block_dimy(),
this->max_aggregate_size,
this->merge_singletons);
else
hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_V3<IndexType, ValueType, false>)
, dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr,
A_column_indices_ptr,
weights_ptr,
num_block_rows,
aggregates_ptr,
strongest_neighbour_ptr,
sizes.raw(),
degree.raw(),
A.get_block_dimy(),
this->max_aggregate_size,
this->merge_singletons);
cudaCheckError();
}
else
{
// 2-phase handshaking
hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_V2) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 1, this->merge_singletons);
cudaCheckError();
// 2nd phase: for each block_row, find the strongest neighbour among those who gave hand on 1st phase
hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_V2) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 2, this->merge_singletons);
cudaCheckError();
}
// Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour
if ( this->merge_singletons == 2 )
{
hipLaunchKernelGGL(( matchEdges) , dim3(num_blocks), dim3(threads_per_block), 0, str, num_block_rows, aggregates_ptr, strongest_neighbour_ptr, sizes.raw());
}
else
{
hipLaunchKernelGGL(( matchEdges) , dim3(num_blocks), dim3(threads_per_block), 0, str, num_block_rows, aggregates_ptr, strongest_neighbour_ptr, (int *)NULL);
}
cudaCheckError();
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
s = (icount & 1);
if ( s == 0 )
{
// count unaggregated vertices
hipMemsetAsync(d_unaggregated, 0, sizeof(int), str);
hipLaunchKernelGGL(( countAggregates<IndexType, threads_per_block>) , dim3(num_blocks), dim3(threads_per_block), 0, str, num_block_rows, aggregates_ptr, d_unaggregated);
cudaCheckError();
hipMemcpyAsync(unaggregated, d_unaggregated, sizeof(int), hipMemcpyDeviceToHost, str);
throttle_event->record(str);
}
else
{
throttle_event->sync();
numUnassigned_previous = numUnassigned;
numUnassigned = *unaggregated;
}
#else
hipStreamSynchronize(str);
numUnassigned_previous = numUnassigned;
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
#endif
icount++;
}
while ( (s == 0) || !(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned == numUnassigned_previous));
}
// printf("%i,\n", icount);
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
delete throttle_event;
#endif
}
else
{
computeMatchingSerialGreedy( A, aggregates, num_aggregates, edge_weights );
}
if ( this->merge_singletons == 1 )
{
// Merge remaining vertices with current aggregates
if (this->deterministic != 1)
{
while (numUnassigned != 0)
{
hipLaunchKernelGGL(( mergeWithExistingAggregatesBlockDiaCsr_V2) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, A.get_block_dimy(), this->deterministic, (IndexType *) NULL);
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
}
}
else
{
typename Matrix_d::IVector aggregates_candidate(num_block_rows, -1);
while (numUnassigned != 0)
{
hipLaunchKernelGGL(( mergeWithExistingAggregatesBlockDiaCsr_V2) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, A.get_block_dimy(), this->deterministic, aggregates_candidate.raw());
cudaCheckError();
hipLaunchKernelGGL(( joinExistingAggregates) , dim3(num_blocks), dim3(threads_per_block), 0, str, num_block_rows, aggregates_ptr, aggregates_candidate.raw());
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
}
aggregates_candidate.resize(0);
}
}
else if (this->merge_singletons == 0 )
{
//make singletons
hipLaunchKernelGGL(( aggregateSingletons) , dim3(num_blocks), dim3(threads_per_block), 0, str, aggregates_ptr, num_block_rows );
cudaCheckError();
}
else if ( this->merge_singletons == 2 )
{
//merges all remaining singletons into adequate neighbors if possible
hipLaunchKernelGGL(( mergeSingletonsSmart) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr,
A_column_indices_ptr,
edge_weights_ptr,
aggregates_ptr,
sizes.raw(),
num_block_rows,
this->max_aggregate_size);
cudaCheckError();
}
//This will assign num_aggregates to the pseudo aggregate without counting it. Perfect!
this->renumberAndCountAggregates(aggregates, aggregates_global, num_block_rows, num_aggregates);
if ( this->merge_singletons == 2 )
{
//udpate the sizes vector, so it matches the renumbered aggregates size
IVector sizesSource;
sizesSource.swap( sizes );
sizes.resize( num_aggregates, 1 );
hipLaunchKernelGGL(( updateAggregateSizes) , dim3(num_blocks), dim3(threads_per_block), 0, str, sizesSource.raw(), sizes.raw(), aggregates_ptr, num_block_rows );
cudaCheckError();
}
}
//instead of a handshake, we use a serial greedy algorithm to compute a better matching
//the algorithm:
// 1. compute degree of every node and sort nodes by degree into double linked list
// 2. while non-isolated nodes left:
// take node with minimum degree > 0
// find strongest edge to unaggregated node and assign to new aggregate
// remove both nodes from linked list
// decrease degree of each neighbor by one for each of the two nodes
// update list
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiPairwiseSelector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeMatchingSerialGreedy( const Matrix_d &A, IVector &aggregates, int &numAggregates, MVector &edge_weights)
{
IndexType numRows = A.row_offsets.size() - 1;
IndexType nnz = A.col_indices.size();
//allocate memory on host
IndexType *ia = new IndexType[numRows + 1];
IndexType *ja = new IndexType[nnz];
ValueType *w = new ValueType[nnz];
IndexType *agg = new IndexType[numRows];
IndexType *deg = new IndexType[numRows];
//copy
hipMemcpy( ia, A.row_offsets.raw(), sizeof(IndexType) * (numRows + 1), hipMemcpyDeviceToHost );
hipMemcpy( ja, A.col_indices.raw(), sizeof(IndexType)*nnz, hipMemcpyDeviceToHost );
hipMemcpy( w, edge_weights.raw(), sizeof(ValueType)*nnz, hipMemcpyDeviceToHost );
//init agg and compute the degree of each aggregate
int max_degree = 0;
for (IndexType i = 0; i < numRows; i++)
{
agg[i] = -1;
int degree = 0;
for (IndexType ii = ia[i]; ii < ia[i + 1]; ii++) //only care for positive weights
{
if ( ja[ii] != i && w[ii] > 0.0 )
{
degree++;
}
}
if ( degree > max_degree )
{
max_degree = degree;
}
deg[i] = degree;
}
if ( max_degree >= numRows )
{
FatalError( "max degree is greater than numRows.", AMGX_ERR_UNKNOWN );
}
//init double linked list
IndexType *fwd = new IndexType[numRows + max_degree + 1];
IndexType *bwd = new IndexType[numRows + max_degree + 1];
for (IndexType i = 0; i < numRows + max_degree + 1; i++)
{
fwd[i] = i;
bwd[i] = i;
}
IndexType nodesLeft = numRows;
numAggregates = 0;
//insert nodes into list
for (IndexType i = numRows - 1; i >= 0; i--) //inserting in backward order the nodes will be sorted by index in case of same degree
{
//insert forward following root
fwd[i] = fwd[numRows + deg[i]];
fwd[numRows + deg[i]] = i;
//insert backward
bwd[i] = numRows + deg[i];
bwd[fwd[i]] = i;
//isolated nodes cannot be aggregated
if ( deg[i] == 0 )
{
nodesLeft--;
}
}
while ( nodesLeft > 0 )
{
IndexType node = numRows;
int degree;
for (degree = 1; degree <= max_degree; degree++)
{
//list not empty -> select node
if ( fwd[numRows + degree] < numRows ) //selecting the first node will select the most recently inserted one or the one with lowest index. both is preferable
{
node = fwd[numRows + degree];
}
if ( node < numRows )
{
break;
}
}
//no node with degree > 1 found even though nodesLeft > 0
if ( node == numRows )
{
FatalError("nodeLeft counting or list invalid", AMGX_ERR_UNKNOWN );
}
if ( agg[node] != -1 )
{
FatalError("node is already aggregated", AMGX_ERR_UNKNOWN );
}
//find strongest edge
ValueType max_weight = 0.0;
IndexType max_node = numRows; //use this as gatekeeper, so if weight == 0 the node index will not be greater than this
for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++)
{
IndexType j = ja[ii];
if ( agg[j] != -1 || j == node)
{
continue;
}
if ( w[ii] > 0.0 )
{
degree--;
}
//deterministic, doesn't selects 0 weight.
if ( w[ii] > max_weight || (w[ii] == max_weight && j > max_node) ) //always taking the edge pointing to the max node can give good alignment if numbering is structured
{
max_node = j;
max_weight = w[ii];
}
} //Note that there has to be at least one neighbor node because degree of node is at least 1.
if ( max_node == numRows )
{
FatalError( "node has no neighbor although degree of node is at least 1", AMGX_ERR_UNKNOWN );
}
if ( degree != 0 )
{
FatalError( "node degree corrupted", AMGX_ERR_UNKNOWN );
}
//aggregate
agg[node] = node;
agg[max_node] = node;
numAggregates++;
nodesLeft -= 2;
//remove from list
fwd[bwd[node]] = fwd[node];
bwd[fwd[node]] = bwd[node];
fwd[bwd[max_node]] = fwd[max_node];
bwd[fwd[max_node]] = bwd[max_node];
//update neighbors and list
//max_node first
for (IndexType ii = ia[max_node]; ii < ia[max_node + 1]; ii++)
{
IndexType j = ja[ii];
if ( agg[j] != -1 || w[ii] <= 0.0)
{
continue;
}
//remove j from list
fwd[bwd[j]] = fwd[j];
bwd[fwd[j]] = bwd[j];
//update degree of j
deg[j]--;
//add j back to start of the list
fwd[j] = fwd[numRows + deg[j]];
bwd[j] = numRows + deg[j];
bwd[fwd[j]] = j;
fwd[bwd[j]] = j;
if (deg[j] == 0)
{
nodesLeft--;
}
}
//node second, this will prefer nodes neighbors over max_nodes neighbors when choosing the next node
for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++)
{
IndexType j = ja[ii];
if ( agg[j] != -1 || w[ii] <= 0.0)
{
continue;
}
//remove j from list
fwd[bwd[j]] = fwd[j];
bwd[fwd[j]] = bwd[j];
//update degree of j
deg[j]--;
//add j back to start of the list
fwd[j] = fwd[numRows + deg[j]];
bwd[j] = numRows + deg[j];
bwd[fwd[j]] = j;
fwd[bwd[j]] = j;
if (deg[j] == 0)
{
nodesLeft--;
}
}
}
//copy result back to device
hipMemcpy( aggregates.raw(), agg, sizeof(IndexType)*numRows, hipMemcpyHostToDevice );
//assert matching
for (IndexType node = 0; node < numRows; node++)
{
if ( agg[node] == -1 )
{
continue;
}
for ( IndexType partner = 0; partner < numRows; partner++)
{
if ( agg[partner] == agg[node] )
{
if ( partner == node )
{
continue;
}
bool neighbor = false;
for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++)
if ( ja[ii] == partner )
{
neighbor = true;
break;
}
if ( !neighbor )
{
for (IndexType ii = ia[partner]; ii < ia[partner + 1]; ii++)
if ( ja[ii] == node )
{
neighbor = true;
break;
}
}
if ( !neighbor )
{
FatalError("Internal error in aggregation selector", AMGX_ERR_INTERNAL);
}
}
}
}
//you shall not leak memory
delete[] ia;
delete[] ja;
delete[] w;
delete[] agg;
delete[] deg;
delete[] fwd;
delete[] bwd;
}
//this kernel merges aggregate2 into aggregate1
template<typename IndexType>
__global__
void mergeAggregates(IndexType *aggregate1, const IndexType *aggregate2, IndexType sizeAggregate1, IndexType sizeAggregate2, IndexType sizeAggregate3)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < sizeAggregate1 )
{
if ( aggregate1[tid] == sizeAggregate2 )
{
aggregate1[tid] = sizeAggregate3;
}
else
{
aggregate1[tid] = aggregate2[aggregate1[tid]];
}
tid += gridDim.x * blockDim.x;
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiPairwiseSelector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeIncompleteGalerkin( const Matrix_h &A,
Matrix_h &Ac,
const typename Matrix_h::IVector &aggregates,
const typename Matrix_h::IVector &R_row_offsets,
const typename Matrix_h::IVector &R_column_indices,
const int num_aggregates )
{
FatalError("computeIncomlpetegalerkin is not supported on host. Run with ghost_offdiag_limit=0 instead.", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiPairwiseSelector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeIncompleteGalerkin( const Matrix_d &A,
Matrix_d &Ac,
const typename Matrix_d::IVector &aggregates,
const typename Matrix_d::IVector &R_row_offsets,
const typename Matrix_d::IVector &R_column_indices,
const int num_aggregates )
{
FatalError("computeIncomlpetegalerkin is not implemented yet. run with ghost_offdiag_limit=0 instead.", AMGX_ERR_NOT_IMPLEMENTED);
}
template<class TConfig>
void MultiPairwiseSelectorBase<TConfig>::assertRestriction( const IVector &R_row_offsets, const IVector &R_col_indices, const IVector &aggregates )
{
int *r_ia = new int[R_row_offsets.size()];
int *r_ja = new int[R_col_indices.size()];
int *agg = new int[aggregates.size()];
int *used_col = new int[aggregates.size()];
for ( int i = 0; i < aggregates.size(); i++ )
{
used_col[i] = 0;
}
hipMemcpy( r_ia, R_row_offsets.raw(), sizeof(int)*R_row_offsets.size(), hipMemcpyDeviceToHost );
hipMemcpy( r_ja, R_col_indices.raw(), sizeof(int)*R_col_indices.size(), hipMemcpyDeviceToHost );
hipMemcpy( agg, aggregates.raw(), sizeof(int)*aggregates.size(), hipMemcpyDeviceToHost );
for ( int i = 0; i < R_row_offsets.size() - 1; i++ )
{
for ( int ii = r_ia[i]; ii < r_ia[i + 1]; ii++ )
{
int j = r_ja[ii];
used_col[j]++;
if ( used_col[j] > 1 )
{
std::cout << "column " << j << " is present at least " << used_col[j] << " times" << std::endl;
}
if ( j < 0 || j >= aggregates.size() )
{
std::cout << "Error: j out of bounds, j = " << j << " and numRows = " << aggregates.size() << std::endl;
}
else if ( agg[j] != i )
{
std::cout << "Error: agg[" << j << "] = " << agg[j] << " != " << i << std::endl;
}
}
}
std::cout << "assert restriction done" << std::endl;
}
template<class T_Config>
void MultiPairwiseSelectorBase<T_Config>::setAggregates(Matrix<T_Config> &A,
IVector &aggregates, IVector &aggregates_global, int &num_aggregates)
{
if (A.get_block_dimx() == A.get_block_dimy())
{
//ghost level matrix. this is a probably a weight matrix
Matrix<TConfig> ghostA;
ghostA.values.resize(0);
//prolongation and restriction operator. this is only needed in when LowDegCoarseAGenerator is used
IVector R_row_offsets;
IVector R_col_indices;
//holds the size of each aggregate
IVector sizes;
sizes.resize(0);
//aggregates for ghost level
IVector aggregates_current;
IVector aggregates_global_current;
bool aggregates_initialized = true;
if (aggregates.size() == 0)
{
aggregates_initialized = false;
if (!A.is_matrix_singleGPU())
{
aggregates.resize(A.manager->halo_offset(A.manager->num_neighbors()));
}
else
{
aggregates.resize(A.get_num_rows());
}
}
//for mergeAggregates kernel
const int threads_per_block = 256;
const int num_blocks = ::min( AMGX_GRID_MAX_SIZE, (A.get_num_rows() - 1) / threads_per_block + 1 );
hipStream_t stream = thrust::global_thread_handle::get_stream();
//initialize and prepare weight matrix
Matrix<TConfig> w;
w.set_initialized(0);
w.addProps(CSR);
w.delProps(COO);
w.setColsReorderedByColor(false);
w.resize( 0, 0, 0, 1, 1, true ); //empty scalar 0x0 matrix
w.values.resize(0); //matrix resize sets the values array to nnz+1 for no apparent reason
IndexType targetSize = 1;
//initialize coarse A generator
CoarseAGenerator<TConfig> *cag;
const bool use_restriction = true;
const bool shrink_ghost_level = false;
cag = new LowDegCoarseAGenerator<TConfig>(mCfg, mCfg_scope);
// This will make coarseAGenerator to allocate more memory inside of galerkin
ghostA.manager = new DistributedManager<TConfig>();
w.manager = new DistributedManager<TConfig>();
Matrix<TConfig> *curA = &A;
//foreach pass do:
// 1. build aggregates and weights
// 2. create weight matrix (in full_ghost_level mode this is the input matrix or the last ghostlevel matrix)
// 3. if in full ghost level mode, build R
// 4. compute next level
for (int current_pass = 1; true; current_pass++)
{
const IndexType numRows = curA->get_num_rows();
const IndexType nnz = curA->get_num_nz();
targetSize *= 2;
if ( full_ghost_level )
{
w.values.resize(0); //compute weights from curA
}
else
{
w.values.swap( ghostA.values ); //use the weights computed with the galerkin operator (this will do nothing in the first pass, both values have size 0)
}
// create aggregates from correct input matrix
setAggregates_common_sqblocks( *curA, aggregates_current, aggregates_global_current, num_aggregates, w.values, sizes );
if ( current_pass > 1 )
{
//merge original aggregate with the newly created ones
hipLaunchKernelGGL(( mergeAggregates) , dim3(num_blocks), dim3(threads_per_block), 0, stream , aggregates.raw(), aggregates_current.raw(), A.get_num_rows(), numRows, num_aggregates );
cudaCheckError();
//mergeAggregates<<< num_blocks, threads_per_block, 0, stream >>>( aggregates_global.raw(), aggregates_global_current.raw(), A.get_num_rows() );
//cudaCheckError();
}
//try to free memory
if ( full_ghost_level )
{
//then we don't need to save the weights, only for original level to do post processing
w.values.resize(0);
}
else
{
//save edge weights for original level later
//in that case we can throw away the values of ghostA as we will use the values to compute the next ghost level
ghostA.values.resize(0);
}
// this is the break condition for the loop
if ( current_pass >= aggregation_passes || num_aggregates <= 1 || num_aggregates == numRows)
{
//this means, aggregates has not been initialized yet
if ( !aggregates_initialized )
{
aggregates.swap( aggregates_current );
}
if ( !aggregates_initialized )
{
aggregates_global.swap( aggregates_global_current );
}
hipStreamSynchronize( stream );
cudaCheckError();
break;
}
//prepare A to be corrupted
curA->set_initialized(0);
//swap in ia, ja from curA
w.row_offsets.swap( curA->row_offsets );
w.col_indices.swap( curA->col_indices );
if ( full_ghost_level )
{
if ( shrink_ghost_level && curA->get_block_dimx() > 1)
{
//set w to correct size
w.values.resize( nnz );
//define grid and offsets
const int num_blocks_inter = ::min( (int)AMGX_GRID_MAX_SIZE, (int)(nnz - 1) / threads_per_block + 1 );
const int sq_blocksize = A.get_block_dimx() * A.get_block_dimy();
const int index_offset = A.get_block_dimy() * m_aggregation_edge_weight_component + m_aggregation_edge_weight_component;
//do the interleaved copy
hipLaunchKernelGGL(( gatherValuesInterleaved) , dim3(num_blocks_inter), dim3(threads_per_block), 0, stream, A.values.raw(), w.values.raw(), nnz, sq_blocksize, index_offset );
hipStreamSynchronize( stream );
cudaCheckError();
}
else
{
w.values.swap( curA->values );
}
}
w.diag.swap( curA->diag );
//resize to inform the matrix of its new size
if ( full_ghost_level && !shrink_ghost_level )
{
w.set_block_dimx( A.get_block_dimx() );
w.set_block_dimy( A.get_block_dimy() );
}
else
{
w.set_block_dimx( 1 );
w.set_block_dimy( 1 );
}
w.set_num_rows( numRows );
w.set_num_cols( numRows );
w.set_num_nz( nnz );
w.set_allow_recompute_diag( false );
if ( curA->hasProps( DIAG ) )
{
w.addProps( DIAG );
}
//ready to use
w.set_initialized(1);
//compute restriction operator
if ( use_restriction )
{
IVector R_row_indices(aggregates_current);
R_row_offsets.resize(num_aggregates + 2);
R_col_indices.resize(numRows);
thrust::sequence(R_col_indices.begin(), R_col_indices.end());
cudaCheckError();
thrust::sort_by_key(R_row_indices.begin(), R_row_indices.end(), R_col_indices.begin());
cudaCheckError();
cusp::detail::indices_to_offsets(R_row_indices, R_row_offsets);
cudaCheckError();
//delete last row, which holds the pseudo aggregate
R_row_offsets.resize( num_aggregates + 1);
R_col_indices.resize( R_row_offsets[num_aggregates] );
}
// 3. compute galerkin ghost level
if ( ghost_offdiag_limit == 0 )
{
//compute full galerkin
cag->computeAOperator(w,
ghostA,
aggregates_current,
R_row_offsets,
R_col_indices,
num_aggregates);
}
else
{
//compute incomplete galerkin
computeIncompleteGalerkin(w,
ghostA,
aggregates_current,
R_row_offsets,
R_col_indices,
num_aggregates);
}
//from now on w will be destroyed again.
w.set_initialized(0);
//repair the original A matrix. its ia and ja are in w
if ( current_pass == 1 )
{
//swap back
w.row_offsets.swap( A.row_offsets );
w.col_indices.swap( A.col_indices );
//only in that case we have swapped the values
if ( full_ghost_level && !shrink_ghost_level )
{
w.values.swap( A.values );
}
//save the edge weights of the original level
A.diag.swap( w.diag );
A.set_initialized(1); //A is repaired now
//save the first aggregates into the original aggregate vector so we can merge them later
aggregates.swap( aggregates_current );
aggregates_global.swap( aggregates_global_current );
aggregates_initialized = true;
curA = &ghostA;
}
}
delete cag;
}
else
{
FatalError("Unsupported block size for MultiPairwise", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
}
// -------------------------
// Explict instantiations
// -------------------------
#define AMGX_CASE_LINE(CASE) template class MultiPairwiseSelectorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class MultiPairwiseSelector<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
}
| 39793d3ba9841ff863bce0db443a23ed6d635eb0.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <aggregation/selectors/multi_pairwise.h>
#include <cutil.h>
#include <util.h>
#include <types.h>
#include <basic_types.h>
#include <texture.h>
#include <matrix_analysis.h>
#include <transpose.h>
#include <async_event.h>
#include <thrust/count.h> //count
#include <thrust/sort.h> //sort
#include <thrust/binary_search.h> //lower_bound
#include <thrust/unique.h> //unique
#include <thrust/host_vector.h>
#include <cusp/detail/format_utils.h> //offsets_to_indices
#include <determinism_checker.h>
#include <solvers/solver.h>
#include <aggregation/coarseAgenerators/thrust_coarse_A_generator.h>
#include <aggregation/coarseAgenerators/low_deg_coarse_A_generator.h>
#include <omp.h>
#define EXPERIMENTAL_ITERATIVE_MATCHING
namespace amgx
{
namespace aggregation
{
namespace multi_pairwise
{
// include common routines for all selectors
#include <aggregation/selectors/common_selector.h>
// ------------------------
// Kernels
// ------------------------
__device__
float random_weight2(int i, int j)
{
#define RAND_MULTIPLIER 1145637293
unsigned long i_min = (min(i, j) * RAND_MULTIPLIER);
unsigned long i_max = (max(i, j) * RAND_MULTIPLIER);
return ((float)i_min / i_max);
}
__device__
unsigned long random_weight3(int i, int j)
{
unsigned long a;
a = (i + j) ^ 8;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
return a;
}
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// Reads the weight from edge_weights array
template <typename IndexType, typename MatrixValueType>
__global__
void findStrongestNeighbourBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices,
MatrixValueType *edge_weights, IndexType num_block_rows, IndexType *aggregates,
IndexType *strongest_neighbour_1phase, IndexType *strongest_neighbour,
const size_t bsize, int phase, int merge_singletons)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
MatrixValueType weight;
int jcol;
while (tid < num_block_rows)
{
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
MatrixValueType max_weight_unaggregated = 0.;
MatrixValueType max_weight_aggregated = 0.;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (phase == 1) { weight = edge_weights[j]; }
else { weight = random_weight2(tid, jcol); }
if (tid == jcol || jcol >= num_block_rows) { continue; } // skip diagonal and halo
if (phase == 2 && strongest_neighbour_1phase[jcol] != tid) { continue; } // if 2nd phase only accept those who gave a hand on the 1st phase
// Identify strongest aggregated and unaggregated neighbours (method by multi_pairwise)
if (aggregates[jcol] == -1 && weight > 0.0 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && random_weight3(tid, jcol) > random_weight3(tid, strongest_unaggregated)))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (aggregates[jcol] != -1 && weight > 0.0 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && random_weight3(tid, jcol) > random_weight3(tid, strongest_aggregated)))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated
{
if ( merge_singletons == 1 )
// Put in same aggregate as strongest neighbour
{
aggregates[tid] = aggregates[strongest_aggregated];
}
else
// create singleton
{
aggregates[tid] = tid;
}
}
else if (strongest_unaggregated != -1)
{
if (phase == 2)
{
MatrixValueType rand_w1 = random_weight2(tid, strongest_neighbour_1phase[tid]);
strongest_neighbour[tid] = max_weight_unaggregated > rand_w1 ? strongest_unaggregated : strongest_neighbour_1phase[tid];
}
else { strongest_neighbour_1phase[tid] = strongest_unaggregated; }
}
else
{
if (phase == 2) { strongest_neighbour[tid] = strongest_neighbour_1phase[tid]; }
else { strongest_neighbour_1phase[tid] = tid; }
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType>
__device__
bool atomicJoin( IndexType node, IndexType aggregate, IndexType *aggregates, int *sizes, int allowed )
{
int mySize = sizes[node];
int theirSize = sizes[aggregate];
int theirSizeOld = theirSize;
do
{
int newSize = mySize + theirSize;
if ( newSize > allowed )
{
return false;
}
theirSizeOld = theirSize;
theirSize = atomicCAS( &sizes[aggregate], theirSize, newSize );
}
while ( theirSize != theirSizeOld );
aggregates[node] = aggregate;
return true;
}
template <typename IndexType, typename MatrixValueType, bool use_degree>
__global__
void findStrongestNeighbourBlockDiaCsr_V3(const IndexType *row_offsets,
const IndexType *column_indices,
MatrixValueType *edge_weights,
IndexType num_block_rows,
IndexType *aggregates,
IndexType *strongest_neighbour,
int *sizes,
int *degree,
const size_t bsize,
int max_aggregate_size,
int merge_singletons)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
MatrixValueType weight;
int jcol;
while (tid < num_block_rows)
{
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
int lowest_degree;
if ( use_degree )
{
lowest_degree = degree[tid]; //only interested in finding lower degree than self
}
else
{
lowest_degree = 0; //if we decide to not use degree than just propose to the strongest edge
}
int lowest_degree_neighbor = tid;
MatrixValueType lowest_degree_weight = 1e100; //high start value, so that same degree neighbor won't become lowest degree neighbor
MatrixValueType max_weight_unaggregated = 0.;
MatrixValueType max_weight_aggregated = 0.;
int mySize;
if ( merge_singletons == 2 )
{
mySize = sizes[tid];
}
else
{
mySize = 0;
}
if ( merge_singletons != 2 )
{
max_aggregate_size = 100000;
}
//this aggregate is already full
if (mySize == max_aggregate_size)
{
aggregates[tid] = tid;
}
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (tid == jcol || jcol >= num_block_rows) { continue; } // skip diagonal and halo
weight = edge_weights[j];
if (weight <= 0.0) { continue; }
if ( aggregates[jcol] != -1 ) //aggregated neighbor
{
int theirSize;
if ( merge_singletons == 2 )
{
theirSize = aggregates[sizes[jcol]];
}
else
{
theirSize = 0;
}
//if all neighbors are aggregated, find the strongest edge to neighbor aggregate that is not full yet
if (mySize + theirSize <= max_aggregate_size &&
(weight > max_weight_aggregated)) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
else //unaggregated neighbor
{
if ( use_degree && merge_singletons == 2 )
{
int theirSize = sizes[jcol];
//get lowest degree neighbor or find out that there is no lower degree neighbor
int current_degree = degree[jcol];
if ( mySize + theirSize <= max_aggregate_size && (current_degree < lowest_degree || current_degree == lowest_degree && weight > lowest_degree_weight) )
{
lowest_degree = current_degree;
lowest_degree_weight = weight;
lowest_degree_neighbor = jcol;
}
//get highest weight neighbor
if ( mySize + theirSize <= max_aggregate_size && (weight > max_weight_unaggregated) )
{
strongest_unaggregated = jcol;
max_weight_unaggregated = weight;
}
}
if ( use_degree && merge_singletons != 2 ) //same as above but ignore sizes
{
//get lowest degree neighbor or find out that there is no lower degree neighbor
int current_degree = degree[jcol];
if ( current_degree < lowest_degree || current_degree == lowest_degree && weight > lowest_degree_weight)
{
lowest_degree = current_degree;
lowest_degree_weight = weight;
lowest_degree_neighbor = jcol;
}
//get highest weight neighbor
if (weight > max_weight_unaggregated)
{
strongest_unaggregated = jcol;
max_weight_unaggregated = weight;
}
}
if ( !use_degree && merge_singletons == 2 )
{
//get highest weight neighbor only but pay attention to the aggregate sizes
int theirSize = sizes[jcol]; //get highest weight neighbor
if ( mySize + theirSize <= max_aggregate_size && (weight > max_weight_unaggregated) )
{
strongest_unaggregated = jcol;
max_weight_unaggregated = weight;
}
}
if ( !use_degree && merge_singletons != 2 )
{
//just highest weight
if (weight > max_weight_unaggregated)
{
strongest_unaggregated = jcol;
max_weight_unaggregated = weight;
}
}
}
}
//prefer lowest degree neighbor
if ( lowest_degree_neighbor != tid )
{
strongest_unaggregated = lowest_degree_neighbor;
}
if (strongest_unaggregated != -1) //Unaggregated neighbor exists
{
strongest_neighbour[tid] = strongest_unaggregated; //assign strongest aggregated
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated but small enough aggregated neighbors exist
{
if ( merge_singletons == 0 )
{
aggregates[tid] = tid;
}
if ( merge_singletons == 1 )
{
aggregates[tid] = aggregates[strongest_aggregated];
}
if ( merge_singletons == 2)
{
atomicJoin( tid, aggregates[strongest_aggregated], aggregates, sizes, max_aggregate_size ); //try to join, can fail. maybe it works next round.
}
}
if (strongest_unaggregated == -1 && strongest_aggregated == -1) //no feasable neighbor at all, become singleton
{
strongest_neighbour[tid] = tid; //become singleton
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType, typename ValueType>
__global__
void computeDegree( const IndexType *ia, const IndexType *ja, const ValueType *weights, IndexType *aggregates, IndexType *sizes, IndexType *degree, IndexType numRows, IndexType max_aggregate_size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
while ( i < numRows )
{
int myDegree = 0;
IndexType ia_ip1 = ia[i + 1];
for ( IndexType ii = ia[i]; ii < ia_ip1; ii++ )
{
IndexType j = ja[ii];
if ( j == i )
{
continue;
}
int mySize, theirSize;
if ( sizes != NULL )
{
mySize = sizes[i];
theirSize = sizes[j];
}
else
{
mySize = theirSize = 0;
}
if ( weights[ii] > 0.0 && aggregates[j] == -1 && mySize + theirSize <= max_aggregate_size )
{
myDegree++;
}
}
degree[i] = myDegree;
i += gridDim.x * blockDim.x;
}
}
template <typename IndexType, typename ValueType>
__global__
void mergeSingletonsSmart(const IndexType *ia, const IndexType *ja, const ValueType *weights, IndexType *aggregates, IndexType *sizes, IndexType numRows, int max_aggregate_size)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < numRows )
{
//unaggregated nodes try to join or create their own aggregate
if ( aggregates[tid] == -1 )
{
bool joined = false;
while ( !joined )
{
int neighbor_aggregate = -1;
ValueType max_weight = 0.0;
IndexType mySize = sizes[tid];
for (IndexType ii = ia[tid]; ii < ia[tid + 1]; ii++)
{
IndexType j = ja[ii];
if (j == tid || j >= numRows) { continue; }
if ( aggregates[j] != -1 && sizes[aggregates[j]] + mySize <= max_aggregate_size && weights[ii] > max_weight )
{
neighbor_aggregate = aggregates[j];
max_weight = weights[ii];
}
}
//no possible neighbor found
if ( neighbor_aggregate == -1 )
{
//create own aggregate
aggregates[tid] = tid;
joined = true;
}
else
{
//try to join
joined = atomicJoin( tid, neighbor_aggregate, aggregates, sizes, max_aggregate_size );
}
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType>
__global__
void updateAggregateSizes( IndexType *sizesSource, IndexType *sizes, IndexType *aggregates, IndexType numRows )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < numRows )
{
IndexType agg = aggregates[tid];
IndexType aggregateSize = sizes[agg];
IndexType mySize = sizesSource[tid];
while ( mySize > aggregateSize )
{
aggregateSize = atomicCAS( &sizes[agg], aggregateSize, mySize );
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel that checks if perfect matchs exist
template <typename IndexType>
__global__
void matchEdges(const IndexType num_rows, IndexType *aggregates, int *strongest_neighbour, IndexType *sizes)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int potential_match, potential_match_neighbour;
while (tid < num_rows)
{
if (aggregates[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
potential_match_neighbour = strongest_neighbour[potential_match];
if ( potential_match == tid )
{
aggregates[tid] = tid;
}
else if (potential_match != -1 && potential_match_neighbour == tid && tid < potential_match) // we have a match
{
aggregates[tid] = tid;
aggregates[potential_match] = tid;
if ( sizes != NULL)
{
sizes[tid] += sizes[potential_match];
}
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType, int block_size>
__global__
void countAggregates(const IndexType num_rows, IndexType *aggregates, int *num_unaggregated)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int c = 0;
int i = tid;
while ( i < num_rows )
{
c += ( aggregates[i] == -1 );
i += gridDim.x * blockDim.x;
}
__shared__ volatile int smem[block_size];
smem[threadIdx.x] = c;
__syncthreads();
for ( int off = blockDim.x / 2; off >= 32; off = off / 2 )
{
if ( threadIdx.x < off )
{
smem[threadIdx.x] += smem[threadIdx.x + off];
}
__syncthreads();
}
// warp reduce
if ( threadIdx.x < 32 )
{
smem[threadIdx.x] += smem[threadIdx.x + 16];
smem[threadIdx.x] += smem[threadIdx.x + 8];
smem[threadIdx.x] += smem[threadIdx.x + 4];
smem[threadIdx.x] += smem[threadIdx.x + 2];
smem[threadIdx.x] += smem[threadIdx.x + 1];
}
if ( threadIdx.x == 0 )
{
atomicAdd(num_unaggregated, smem[0]);
}
}
template <typename IndexType>
__global__
void joinExistingAggregates(IndexType num_rows, IndexType *aggregates, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while (tid < num_rows)
{
if (aggregates[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row
{
aggregates[tid] = aggregates_candidate[tid];
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel that merges unaggregated vertices its strongest aggregated neighbour
// Weights are read from edge_weights array
// For block_dia_csr_matrix_format
template <typename IndexType, typename MatrixValueType>
__global__
void mergeWithExistingAggregatesBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices, const MatrixValueType *edge_weights,
const int num_block_rows, IndexType *aggregates, int bsize, const int deterministic, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int jcol;
MatrixValueType weight;
while (tid < num_block_rows)
{
MatrixValueType max_weight_aggregated = 0.;
int strongest_aggregated = -1;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
// Compute edge weight
weight = edge_weights[j];
jcol = column_indices[j];
if (jcol == tid || jcol >= num_block_rows) { continue; } // skip diagonal
if ( aggregates[jcol] == num_block_rows ) { continue; } // skip dd rows
// Identify strongest aggregated neighbour
if (aggregates[jcol] != -1 && weight > 0 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && random_weight3( tid, jcol ) > random_weight3( tid, strongest_aggregated )))) //
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_aggregated != -1) // Found a neighbour to aggregate to
{
if (deterministic == 1)
{
aggregates_candidate[tid] = aggregates[strongest_aggregated];
}
else
{
// Put in same aggregate as strongest neighbour
aggregates[tid] = aggregates[strongest_aggregated];
}
}
else // All neighbours are unaggregated, leave alone
{
if (deterministic == 1)
{
aggregates_candidate[tid] = tid;
}
else
{
aggregates[tid] = tid;
}
}
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel to extract diagonal for csr_matrix format
template <typename IndexType, typename ValueType>
__global__
void getDiagonalKernel(const IndexType *offsets, const IndexType *column_indices,
const ValueType *values, const IndexType numRows, ValueType *diagonal)
{
int tIdx = threadIdx.x + blockDim.x * blockIdx.x;
while (tIdx < numRows)
{
const int offset = offsets[tIdx];
const int numj = offsets[tIdx + 1] - offset;
for (int j = offset; j < offset + numj; j++)
{
int jcol = column_indices[j];
if (tIdx == jcol)
{
diagonal[tIdx] = values[j];
}
}
tIdx += gridDim.x * blockDim.x;
}
}
// Kernel to extract diagonal for csr_matrix format
template <typename IndexType, typename ValueType>
__global__
void getDiagonalKernelNoDiaProp(const IndexType *dia_idx, const ValueType *values, const IndexType numRows, ValueType *diagonal)
{
int tIdx = threadIdx.x + blockDim.x * blockIdx.x;
while (tIdx < numRows)
{
diagonal[tIdx] = values[dia_idx[tIdx]];
tIdx += gridDim.x * blockDim.x;
}
}
// filter edge weights like this:
// set w_ij = 0 iff
// w_ij < alpha * sqrt( max_k{w_ik} * max_l{w_jl} )
// alpha is some constant, 0.25 or 0.5 should work fine
template<typename IndexType, typename ValueType>
__global__
void filterWeights( const IndexType *row_offsets, const IndexType *row_indices, const IndexType *col_indices, const IndexType *diag, const ValueType *old_weights, ValueType *new_weights, IndexType num_nonzero_blocks, IndexType num_owned, ValueType alpha )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int i, j, kmin, kmax;
ValueType max_ik, max_jl;
while ( tid < num_nonzero_blocks )
{
i = row_indices[tid];
j = col_indices[tid];
if ( i != j && j < num_owned )
{
//find max_k{w_ik}
kmin = row_offsets[i];
kmax = row_offsets[i + 1];
max_ik = 0.0;
for (int k = kmin; k < kmax; k++)
{
if ( col_indices[k] != i && old_weights[k] > max_ik )
{
max_ik = old_weights[k];
}
}
//find max_l{w_jl}
kmin = row_offsets[j];
kmax = row_offsets[j + 1];
max_jl = 0.0;
for (int l = kmin; l < kmax; l++)
{
if ( col_indices[l] != j && old_weights[l] > max_jl )
{
max_jl = old_weights[l];
}
}
//test squared inequality
if ( old_weights[tid] * old_weights[tid] < alpha * alpha * max_ik * max_jl )
{
new_weights[tid] = 0.0;
}
else //rescale to relative importance. this should also increase the chance of a handshake
{
new_weights[tid] = old_weights[tid];
}
// new_weights[tid] = old_weights[tid] / sqrt(max_ik*max_jl);
}
tid += gridDim.x * blockDim.x;
}
}
template<typename IndexType, typename ValueType>
__global__
void gatherValuesInterleaved( const ValueType *inValues, ValueType *outValues, IndexType nnz, int sq_blocksize, int index_offset )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < nnz )
{
//at least the write is coalesced
outValues[tid] = inValues[tid * sq_blocksize + index_offset];
tid += gridDim.x * blockDim.x;
}
}
template<typename IndexType, typename ValueTypeV, typename ValueTypeM>
__global__
void addToWeights( ValueTypeM *edge_weights, const ValueTypeV *x, const IndexType *row_indices, IndexType *col_indices, IndexType nnz, double scale )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < nnz )
{
int i = row_indices[tid];
int j = col_indices[tid];
edge_weights[tid] -= static_cast<ValueTypeM>( scale * fabs( x[i] - x[j] ) );
tid += gridDim.x * blockDim.x;
}
}
template <typename ValueType, typename IndexType>
__global__
void rescaleVector( ValueType *x, IndexType numRows )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < numRows )
{
x[tid] = 2 * x[tid] - 1;
tid += gridDim.x * blockDim.x;
}
}
// -----------------
// Methods
// ----------------
// Constructor
template<class T_Config>
MultiPairwiseSelectorBase<T_Config>::MultiPairwiseSelectorBase(AMG_Config &cfg, const std::string &cfg_scope)
{
deterministic = cfg.AMG_Config::template getParameter<IndexType>("determinism_flag", "default");
max_iterations = cfg.AMG_Config::template getParameter<IndexType>("max_matching_iterations", cfg_scope);
numUnassigned_tol = cfg.AMG_Config::template getParameter<double>("max_unassigned_percentage", cfg_scope);
two_phase = cfg.AMG_Config::template getParameter<int>("handshaking_phases", cfg_scope) == 2;
m_aggregation_edge_weight_component = cfg.AMG_Config::template getParameter<int>("aggregation_edge_weight_component", cfg_scope);
aggregation_passes = cfg.AMG_Config::template getParameter<int>("aggregation_passes", cfg_scope); //default to size 8 aggregates. maybe its more convenient to have that as a config parameter
filter_weights = cfg.AMG_Config::template getParameter<int>("filter_weights", cfg_scope); //by default: no filtering
filter_weights_alpha = cfg.AMG_Config::template getParameter<double>( "filter_weights_alpha", cfg_scope ); //default to 0.25
full_ghost_level = cfg.AMG_Config::template getParameter<int>( "full_ghost_level", cfg_scope ); //defaults to 0
notay_weights = cfg.AMG_Config::template getParameter<int>( "notay_weights", cfg_scope ); //defaults to 0
ghost_offdiag_limit = cfg.AMG_Config::template getParameter<int>( "ghost_offdiag_limit", cfg_scope ); //defaults to 0
merge_singletons = cfg.AMG_Config::template getParameter<int>( "merge_singletons", cfg_scope ); //defaults to 1
weight_formula = cfg.AMG_Config::template getParameter<int>( "weight_formula", cfg_scope ); //wheight formula defaults to 0
serial_matching = cfg.AMG_Config::template getParameter<int>( "serial_matching", cfg_scope ) != 0; //will use a serial matching algorithm instead of handshake
modified_handshake = cfg.AMG_Config::template getParameter<int>("modified_handshake", cfg_scope ) == 1;
//passes = 1 -> max = 3
//passes = 2 -> max = 5
//passes = 3 -> max = 10
//passes = 4 -> max = 18
max_aggregate_size = 2;
for (int i = 1; i < aggregation_passes; i ++)
{
max_aggregate_size *= 2;
}
max_aggregate_size += aggregation_passes - (aggregation_passes / 2);
mCfg = cfg;
mCfg_scope = cfg_scope;
}
// setAggregates for block_dia_csr_matrix_h format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiPairwiseSelector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(Matrix_h &A,
typename Matrix_h::IVector &aggregates, typename Matrix_h::IVector &aggregates_global, int &num_aggregates, MVector &edge_weights, IVector &sizes)
{
FatalError("MultiPairwise selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
// device specialization
//edge_weights is an in/out parameter:
//if its size is zero, the edge_weights will be computed from A and stored into edge_weights
//else the edge_weights will not be computed and assumed to be valid for the given A. the value array of A is not used in this case
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiPairwiseSelector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(Matrix_d &A,
typename Matrix_d::IVector &aggregates,
typename Matrix_d::IVector &aggregates_global,
int &num_aggregates,
MVector &edge_weights,
IVector &sizes)
{
IndexType num_block_rows = (int) A.get_num_rows();
IndexType num_nonzero_blocks = (int) A.get_num_nz();
// both ways are supported
IndexType total_nz = (A.is_matrix_singleGPU()) ? num_nonzero_blocks : A.manager->num_nz_all();
typename Matrix_d::IVector &row_indices = A.row_indices;
row_indices.resize( total_nz);
cusp::detail::offsets_to_indices(A.row_offsets, row_indices);
IndexType total_rows = (A.is_matrix_singleGPU()) ? A.get_num_rows() : A.manager->num_rows_all();
aggregates.resize(total_rows);
thrust::fill(aggregates.begin(), aggregates.end(), -1);
cudaCheckError();
if ( this->merge_singletons == 2 && sizes.size() == 0 )
{
sizes.resize( total_rows, 1 ); //init with all ones
}
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_row_indices_ptr = row_indices.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
const ValueType *A_nonzero_values_ptr = A.values.raw();
typename Matrix_d::IVector strongest_neighbour(num_block_rows, -1);
typename Matrix_d::IVector strongest_neighbour_1phase(num_block_rows, -1);
Vector<TemplateConfig<AMGX_device, AMGX_vecUInt, t_matPrec, t_indPrec> > aggregated(num_block_rows, 0);
IndexType *strongest_neighbour_ptr = strongest_neighbour.raw();
IndexType *strongest_neighbour_1phase_ptr = strongest_neighbour_1phase.raw();
IndexType *aggregates_ptr = aggregates.raw();
const int threads_per_block = 256;
const int num_blocks = std::min( AMGX_GRID_MAX_SIZE, (num_block_rows - 1) / threads_per_block + 1 );
int numUnassigned = num_block_rows;
int numUnassigned_previous = numUnassigned;
bool computeWeights = ( edge_weights.size() == 0 );
if (computeWeights)
{
if ( A.hasProps( DIAG ) )
{
edge_weights.resize( num_nonzero_blocks + num_block_rows, 0.0 );
}
else
{
edge_weights.resize( num_nonzero_blocks + 1, -1 ); //+1 is important to some algorithms
}
}
ValueType *edge_weights_ptr = edge_weights.raw();
ValueType *rand_edge_weights_ptr = NULL;
cudaStream_t str = thrust::global_thread_handle::get_stream();
// Compute the edge weights
if ( computeWeights )
{
const int num_blocks_V2 = std::min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1);
//compute with std formula
cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType, ValueType, ValueType>, cudaFuncCachePreferL1);
computeEdgeWeightsBlockDiaCsr_V2 <<< num_blocks_V2, threads_per_block, 0, str>>>(A_row_offsets_ptr,
A_row_indices_ptr,
A_column_indices_ptr,
A_dia_idx_ptr,
A_nonzero_values_ptr,
num_nonzero_blocks,
edge_weights_ptr,
rand_edge_weights_ptr,
num_block_rows,
A.get_block_dimy(),
this->m_aggregation_edge_weight_component,
this->weight_formula);
cudaCheckError();
}
//filter weights if desired
if ( this->filter_weights == 1 )
{
MVector tmp( edge_weights.size() );
const int num_blocks_filter = std::min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1);
cudaStreamSynchronize(str);
cudaCheckError();
filterWeights <<< num_blocks_filter, threads_per_block, 0, str>>>( A_row_offsets_ptr,
A_row_indices_ptr,
A_column_indices_ptr,
A_dia_idx_ptr,
edge_weights_ptr,
tmp.raw(),
num_nonzero_blocks,
num_block_rows,
this->filter_weights_alpha);
cudaStreamSynchronize(str);
cudaCheckError();
tmp.swap( edge_weights );
edge_weights_ptr = edge_weights.raw();
}
// compute matching
if ( !this->serial_matching )
{
IVector degree;
if ( this->modified_handshake )
{
degree.resize( num_block_rows );
}
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
// TODO: allocate host pinned memory
AsyncEvent *throttle_event = new AsyncEvent;
throttle_event->create();
typename Matrix_h::IVector h_unagg_vec(1);
typename Matrix_d::IVector d_unagg_vec(1);
int *unaggregated = h_unagg_vec.raw();
int *d_unaggregated = d_unagg_vec.raw();
#endif
int icount, s = 1;
{
icount = 0;
ValueType *weights_ptr = edge_weights_ptr;
do
{
if ( !this->two_phase )
{
if ( this->modified_handshake )
computeDegree <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr,
A_column_indices_ptr,
weights_ptr,
aggregates_ptr,
sizes.raw(),
degree.raw(),
num_block_rows,
this->max_aggregate_size );
// 1-phase handshaking
if ( this->modified_handshake )
findStrongestNeighbourBlockDiaCsr_V3<IndexType, ValueType, true>
<<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr,
A_column_indices_ptr,
weights_ptr,
num_block_rows,
aggregates_ptr,
strongest_neighbour_ptr,
sizes.raw(),
degree.raw(),
A.get_block_dimy(),
this->max_aggregate_size,
this->merge_singletons);
else
findStrongestNeighbourBlockDiaCsr_V3<IndexType, ValueType, false>
<<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr,
A_column_indices_ptr,
weights_ptr,
num_block_rows,
aggregates_ptr,
strongest_neighbour_ptr,
sizes.raw(),
degree.raw(),
A.get_block_dimy(),
this->max_aggregate_size,
this->merge_singletons);
cudaCheckError();
}
else
{
// 2-phase handshaking
findStrongestNeighbourBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 1, this->merge_singletons);
cudaCheckError();
// 2nd phase: for each block_row, find the strongest neighbour among those who gave hand on 1st phase
findStrongestNeighbourBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 2, this->merge_singletons);
cudaCheckError();
}
// Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour
if ( this->merge_singletons == 2 )
{
matchEdges <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, strongest_neighbour_ptr, sizes.raw());
}
else
{
matchEdges <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, strongest_neighbour_ptr, (int *)NULL);
}
cudaCheckError();
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
s = (icount & 1);
if ( s == 0 )
{
// count unaggregated vertices
cudaMemsetAsync(d_unaggregated, 0, sizeof(int), str);
countAggregates<IndexType, threads_per_block> <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, d_unaggregated);
cudaCheckError();
cudaMemcpyAsync(unaggregated, d_unaggregated, sizeof(int), cudaMemcpyDeviceToHost, str);
throttle_event->record(str);
}
else
{
throttle_event->sync();
numUnassigned_previous = numUnassigned;
numUnassigned = *unaggregated;
}
#else
cudaStreamSynchronize(str);
numUnassigned_previous = numUnassigned;
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
#endif
icount++;
}
while ( (s == 0) || !(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned == numUnassigned_previous));
}
// printf("%i,\n", icount);
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
delete throttle_event;
#endif
}
else
{
computeMatchingSerialGreedy( A, aggregates, num_aggregates, edge_weights );
}
if ( this->merge_singletons == 1 )
{
// Merge remaining vertices with current aggregates
if (this->deterministic != 1)
{
while (numUnassigned != 0)
{
mergeWithExistingAggregatesBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, A.get_block_dimy(), this->deterministic, (IndexType *) NULL);
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
}
}
else
{
typename Matrix_d::IVector aggregates_candidate(num_block_rows, -1);
while (numUnassigned != 0)
{
mergeWithExistingAggregatesBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, A.get_block_dimy(), this->deterministic, aggregates_candidate.raw());
cudaCheckError();
joinExistingAggregates <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, aggregates_candidate.raw());
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
}
aggregates_candidate.resize(0);
}
}
else if (this->merge_singletons == 0 )
{
//make singletons
aggregateSingletons <<< num_blocks, threads_per_block, 0, str>>>( aggregates_ptr, num_block_rows );
cudaCheckError();
}
else if ( this->merge_singletons == 2 )
{
//merges all remaining singletons into adequate neighbors if possible
mergeSingletonsSmart <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr,
A_column_indices_ptr,
edge_weights_ptr,
aggregates_ptr,
sizes.raw(),
num_block_rows,
this->max_aggregate_size);
cudaCheckError();
}
//This will assign num_aggregates to the pseudo aggregate without counting it. Perfect!
this->renumberAndCountAggregates(aggregates, aggregates_global, num_block_rows, num_aggregates);
if ( this->merge_singletons == 2 )
{
//udpate the sizes vector, so it matches the renumbered aggregates size
IVector sizesSource;
sizesSource.swap( sizes );
sizes.resize( num_aggregates, 1 );
updateAggregateSizes <<< num_blocks, threads_per_block, 0, str>>>( sizesSource.raw(), sizes.raw(), aggregates_ptr, num_block_rows );
cudaCheckError();
}
}
//instead of a handshake, we use a serial greedy algorithm to compute a better matching
//the algorithm:
// 1. compute degree of every node and sort nodes by degree into double linked list
// 2. while non-isolated nodes left:
// take node with minimum degree > 0
// find strongest edge to unaggregated node and assign to new aggregate
// remove both nodes from linked list
// decrease degree of each neighbor by one for each of the two nodes
// update list
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiPairwiseSelector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeMatchingSerialGreedy( const Matrix_d &A, IVector &aggregates, int &numAggregates, MVector &edge_weights)
{
IndexType numRows = A.row_offsets.size() - 1;
IndexType nnz = A.col_indices.size();
//allocate memory on host
IndexType *ia = new IndexType[numRows + 1];
IndexType *ja = new IndexType[nnz];
ValueType *w = new ValueType[nnz];
IndexType *agg = new IndexType[numRows];
IndexType *deg = new IndexType[numRows];
//copy
cudaMemcpy( ia, A.row_offsets.raw(), sizeof(IndexType) * (numRows + 1), cudaMemcpyDeviceToHost );
cudaMemcpy( ja, A.col_indices.raw(), sizeof(IndexType)*nnz, cudaMemcpyDeviceToHost );
cudaMemcpy( w, edge_weights.raw(), sizeof(ValueType)*nnz, cudaMemcpyDeviceToHost );
//init agg and compute the degree of each aggregate
int max_degree = 0;
for (IndexType i = 0; i < numRows; i++)
{
agg[i] = -1;
int degree = 0;
for (IndexType ii = ia[i]; ii < ia[i + 1]; ii++) //only care for positive weights
{
if ( ja[ii] != i && w[ii] > 0.0 )
{
degree++;
}
}
if ( degree > max_degree )
{
max_degree = degree;
}
deg[i] = degree;
}
if ( max_degree >= numRows )
{
FatalError( "max degree is greater than numRows.", AMGX_ERR_UNKNOWN );
}
//init double linked list
IndexType *fwd = new IndexType[numRows + max_degree + 1];
IndexType *bwd = new IndexType[numRows + max_degree + 1];
for (IndexType i = 0; i < numRows + max_degree + 1; i++)
{
fwd[i] = i;
bwd[i] = i;
}
IndexType nodesLeft = numRows;
numAggregates = 0;
//insert nodes into list
for (IndexType i = numRows - 1; i >= 0; i--) //inserting in backward order the nodes will be sorted by index in case of same degree
{
//insert forward following root
fwd[i] = fwd[numRows + deg[i]];
fwd[numRows + deg[i]] = i;
//insert backward
bwd[i] = numRows + deg[i];
bwd[fwd[i]] = i;
//isolated nodes cannot be aggregated
if ( deg[i] == 0 )
{
nodesLeft--;
}
}
while ( nodesLeft > 0 )
{
IndexType node = numRows;
int degree;
for (degree = 1; degree <= max_degree; degree++)
{
//list not empty -> select node
if ( fwd[numRows + degree] < numRows ) //selecting the first node will select the most recently inserted one or the one with lowest index. both is preferable
{
node = fwd[numRows + degree];
}
if ( node < numRows )
{
break;
}
}
//no node with degree > 1 found even though nodesLeft > 0
if ( node == numRows )
{
FatalError("nodeLeft counting or list invalid", AMGX_ERR_UNKNOWN );
}
if ( agg[node] != -1 )
{
FatalError("node is already aggregated", AMGX_ERR_UNKNOWN );
}
//find strongest edge
ValueType max_weight = 0.0;
IndexType max_node = numRows; //use this as gatekeeper, so if weight == 0 the node index will not be greater than this
for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++)
{
IndexType j = ja[ii];
if ( agg[j] != -1 || j == node)
{
continue;
}
if ( w[ii] > 0.0 )
{
degree--;
}
//deterministic, doesn't selects 0 weight.
if ( w[ii] > max_weight || (w[ii] == max_weight && j > max_node) ) //always taking the edge pointing to the max node can give good alignment if numbering is structured
{
max_node = j;
max_weight = w[ii];
}
} //Note that there has to be at least one neighbor node because degree of node is at least 1.
if ( max_node == numRows )
{
FatalError( "node has no neighbor although degree of node is at least 1", AMGX_ERR_UNKNOWN );
}
if ( degree != 0 )
{
FatalError( "node degree corrupted", AMGX_ERR_UNKNOWN );
}
//aggregate
agg[node] = node;
agg[max_node] = node;
numAggregates++;
nodesLeft -= 2;
//remove from list
fwd[bwd[node]] = fwd[node];
bwd[fwd[node]] = bwd[node];
fwd[bwd[max_node]] = fwd[max_node];
bwd[fwd[max_node]] = bwd[max_node];
//update neighbors and list
//max_node first
for (IndexType ii = ia[max_node]; ii < ia[max_node + 1]; ii++)
{
IndexType j = ja[ii];
if ( agg[j] != -1 || w[ii] <= 0.0)
{
continue;
}
//remove j from list
fwd[bwd[j]] = fwd[j];
bwd[fwd[j]] = bwd[j];
//update degree of j
deg[j]--;
//add j back to start of the list
fwd[j] = fwd[numRows + deg[j]];
bwd[j] = numRows + deg[j];
bwd[fwd[j]] = j;
fwd[bwd[j]] = j;
if (deg[j] == 0)
{
nodesLeft--;
}
}
//node second, this will prefer nodes neighbors over max_nodes neighbors when choosing the next node
for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++)
{
IndexType j = ja[ii];
if ( agg[j] != -1 || w[ii] <= 0.0)
{
continue;
}
//remove j from list
fwd[bwd[j]] = fwd[j];
bwd[fwd[j]] = bwd[j];
//update degree of j
deg[j]--;
//add j back to start of the list
fwd[j] = fwd[numRows + deg[j]];
bwd[j] = numRows + deg[j];
bwd[fwd[j]] = j;
fwd[bwd[j]] = j;
if (deg[j] == 0)
{
nodesLeft--;
}
}
}
//copy result back to device
cudaMemcpy( aggregates.raw(), agg, sizeof(IndexType)*numRows, cudaMemcpyHostToDevice );
//assert matching
for (IndexType node = 0; node < numRows; node++)
{
if ( agg[node] == -1 )
{
continue;
}
for ( IndexType partner = 0; partner < numRows; partner++)
{
if ( agg[partner] == agg[node] )
{
if ( partner == node )
{
continue;
}
bool neighbor = false;
for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++)
if ( ja[ii] == partner )
{
neighbor = true;
break;
}
if ( !neighbor )
{
for (IndexType ii = ia[partner]; ii < ia[partner + 1]; ii++)
if ( ja[ii] == node )
{
neighbor = true;
break;
}
}
if ( !neighbor )
{
FatalError("Internal error in aggregation selector", AMGX_ERR_INTERNAL);
}
}
}
}
//you shall not leak memory
delete[] ia;
delete[] ja;
delete[] w;
delete[] agg;
delete[] deg;
delete[] fwd;
delete[] bwd;
}
//this kernel merges aggregate2 into aggregate1
template<typename IndexType>
__global__
void mergeAggregates(IndexType *aggregate1, const IndexType *aggregate2, IndexType sizeAggregate1, IndexType sizeAggregate2, IndexType sizeAggregate3)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < sizeAggregate1 )
{
if ( aggregate1[tid] == sizeAggregate2 )
{
aggregate1[tid] = sizeAggregate3;
}
else
{
aggregate1[tid] = aggregate2[aggregate1[tid]];
}
tid += gridDim.x * blockDim.x;
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiPairwiseSelector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeIncompleteGalerkin( const Matrix_h &A,
Matrix_h &Ac,
const typename Matrix_h::IVector &aggregates,
const typename Matrix_h::IVector &R_row_offsets,
const typename Matrix_h::IVector &R_column_indices,
const int num_aggregates )
{
FatalError("computeIncomlpetegalerkin is not supported on host. Run with ghost_offdiag_limit=0 instead.", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiPairwiseSelector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeIncompleteGalerkin( const Matrix_d &A,
Matrix_d &Ac,
const typename Matrix_d::IVector &aggregates,
const typename Matrix_d::IVector &R_row_offsets,
const typename Matrix_d::IVector &R_column_indices,
const int num_aggregates )
{
FatalError("computeIncomlpetegalerkin is not implemented yet. run with ghost_offdiag_limit=0 instead.", AMGX_ERR_NOT_IMPLEMENTED);
}
template<class TConfig>
void MultiPairwiseSelectorBase<TConfig>::assertRestriction( const IVector &R_row_offsets, const IVector &R_col_indices, const IVector &aggregates )
{
int *r_ia = new int[R_row_offsets.size()];
int *r_ja = new int[R_col_indices.size()];
int *agg = new int[aggregates.size()];
int *used_col = new int[aggregates.size()];
for ( int i = 0; i < aggregates.size(); i++ )
{
used_col[i] = 0;
}
cudaMemcpy( r_ia, R_row_offsets.raw(), sizeof(int)*R_row_offsets.size(), cudaMemcpyDeviceToHost );
cudaMemcpy( r_ja, R_col_indices.raw(), sizeof(int)*R_col_indices.size(), cudaMemcpyDeviceToHost );
cudaMemcpy( agg, aggregates.raw(), sizeof(int)*aggregates.size(), cudaMemcpyDeviceToHost );
for ( int i = 0; i < R_row_offsets.size() - 1; i++ )
{
for ( int ii = r_ia[i]; ii < r_ia[i + 1]; ii++ )
{
int j = r_ja[ii];
used_col[j]++;
if ( used_col[j] > 1 )
{
std::cout << "column " << j << " is present at least " << used_col[j] << " times" << std::endl;
}
if ( j < 0 || j >= aggregates.size() )
{
std::cout << "Error: j out of bounds, j = " << j << " and numRows = " << aggregates.size() << std::endl;
}
else if ( agg[j] != i )
{
std::cout << "Error: agg[" << j << "] = " << agg[j] << " != " << i << std::endl;
}
}
}
std::cout << "assert restriction done" << std::endl;
}
template<class T_Config>
void MultiPairwiseSelectorBase<T_Config>::setAggregates(Matrix<T_Config> &A,
IVector &aggregates, IVector &aggregates_global, int &num_aggregates)
{
if (A.get_block_dimx() == A.get_block_dimy())
{
//ghost level matrix. this is a probably a weight matrix
Matrix<TConfig> ghostA;
ghostA.values.resize(0);
//prolongation and restriction operator. this is only needed in when LowDegCoarseAGenerator is used
IVector R_row_offsets;
IVector R_col_indices;
//holds the size of each aggregate
IVector sizes;
sizes.resize(0);
//aggregates for ghost level
IVector aggregates_current;
IVector aggregates_global_current;
bool aggregates_initialized = true;
if (aggregates.size() == 0)
{
aggregates_initialized = false;
if (!A.is_matrix_singleGPU())
{
aggregates.resize(A.manager->halo_offset(A.manager->num_neighbors()));
}
else
{
aggregates.resize(A.get_num_rows());
}
}
//for mergeAggregates kernel
const int threads_per_block = 256;
const int num_blocks = std::min( AMGX_GRID_MAX_SIZE, (A.get_num_rows() - 1) / threads_per_block + 1 );
cudaStream_t stream = thrust::global_thread_handle::get_stream();
//initialize and prepare weight matrix
Matrix<TConfig> w;
w.set_initialized(0);
w.addProps(CSR);
w.delProps(COO);
w.setColsReorderedByColor(false);
w.resize( 0, 0, 0, 1, 1, true ); //empty scalar 0x0 matrix
w.values.resize(0); //matrix resize sets the values array to nnz+1 for no apparent reason
IndexType targetSize = 1;
//initialize coarse A generator
CoarseAGenerator<TConfig> *cag;
const bool use_restriction = true;
const bool shrink_ghost_level = false;
cag = new LowDegCoarseAGenerator<TConfig>(mCfg, mCfg_scope);
// This will make coarseAGenerator to allocate more memory inside of galerkin
ghostA.manager = new DistributedManager<TConfig>();
w.manager = new DistributedManager<TConfig>();
Matrix<TConfig> *curA = &A;
//foreach pass do:
// 1. build aggregates and weights
// 2. create weight matrix (in full_ghost_level mode this is the input matrix or the last ghostlevel matrix)
// 3. if in full ghost level mode, build R
// 4. compute next level
for (int current_pass = 1; true; current_pass++)
{
const IndexType numRows = curA->get_num_rows();
const IndexType nnz = curA->get_num_nz();
targetSize *= 2;
if ( full_ghost_level )
{
w.values.resize(0); //compute weights from curA
}
else
{
w.values.swap( ghostA.values ); //use the weights computed with the galerkin operator (this will do nothing in the first pass, both values have size 0)
}
// create aggregates from correct input matrix
setAggregates_common_sqblocks( *curA, aggregates_current, aggregates_global_current, num_aggregates, w.values, sizes );
if ( current_pass > 1 )
{
//merge original aggregate with the newly created ones
mergeAggregates <<< num_blocks, threads_per_block, 0, stream >>>( aggregates.raw(), aggregates_current.raw(), A.get_num_rows(), numRows, num_aggregates );
cudaCheckError();
//mergeAggregates<<< num_blocks, threads_per_block, 0, stream >>>( aggregates_global.raw(), aggregates_global_current.raw(), A.get_num_rows() );
//cudaCheckError();
}
//try to free memory
if ( full_ghost_level )
{
//then we don't need to save the weights, only for original level to do post processing
w.values.resize(0);
}
else
{
//save edge weights for original level later
//in that case we can throw away the values of ghostA as we will use the values to compute the next ghost level
ghostA.values.resize(0);
}
// this is the break condition for the loop
if ( current_pass >= aggregation_passes || num_aggregates <= 1 || num_aggregates == numRows)
{
//this means, aggregates has not been initialized yet
if ( !aggregates_initialized )
{
aggregates.swap( aggregates_current );
}
if ( !aggregates_initialized )
{
aggregates_global.swap( aggregates_global_current );
}
cudaStreamSynchronize( stream );
cudaCheckError();
break;
}
//prepare A to be corrupted
curA->set_initialized(0);
//swap in ia, ja from curA
w.row_offsets.swap( curA->row_offsets );
w.col_indices.swap( curA->col_indices );
if ( full_ghost_level )
{
if ( shrink_ghost_level && curA->get_block_dimx() > 1)
{
//set w to correct size
w.values.resize( nnz );
//define grid and offsets
const int num_blocks_inter = std::min( (int)AMGX_GRID_MAX_SIZE, (int)(nnz - 1) / threads_per_block + 1 );
const int sq_blocksize = A.get_block_dimx() * A.get_block_dimy();
const int index_offset = A.get_block_dimy() * m_aggregation_edge_weight_component + m_aggregation_edge_weight_component;
//do the interleaved copy
gatherValuesInterleaved <<< num_blocks_inter, threads_per_block, 0, stream>>>( A.values.raw(), w.values.raw(), nnz, sq_blocksize, index_offset );
cudaStreamSynchronize( stream );
cudaCheckError();
}
else
{
w.values.swap( curA->values );
}
}
w.diag.swap( curA->diag );
//resize to inform the matrix of its new size
if ( full_ghost_level && !shrink_ghost_level )
{
w.set_block_dimx( A.get_block_dimx() );
w.set_block_dimy( A.get_block_dimy() );
}
else
{
w.set_block_dimx( 1 );
w.set_block_dimy( 1 );
}
w.set_num_rows( numRows );
w.set_num_cols( numRows );
w.set_num_nz( nnz );
w.set_allow_recompute_diag( false );
if ( curA->hasProps( DIAG ) )
{
w.addProps( DIAG );
}
//ready to use
w.set_initialized(1);
//compute restriction operator
if ( use_restriction )
{
IVector R_row_indices(aggregates_current);
R_row_offsets.resize(num_aggregates + 2);
R_col_indices.resize(numRows);
thrust::sequence(R_col_indices.begin(), R_col_indices.end());
cudaCheckError();
thrust::sort_by_key(R_row_indices.begin(), R_row_indices.end(), R_col_indices.begin());
cudaCheckError();
cusp::detail::indices_to_offsets(R_row_indices, R_row_offsets);
cudaCheckError();
//delete last row, which holds the pseudo aggregate
R_row_offsets.resize( num_aggregates + 1);
R_col_indices.resize( R_row_offsets[num_aggregates] );
}
// 3. compute galerkin ghost level
if ( ghost_offdiag_limit == 0 )
{
//compute full galerkin
cag->computeAOperator(w,
ghostA,
aggregates_current,
R_row_offsets,
R_col_indices,
num_aggregates);
}
else
{
//compute incomplete galerkin
computeIncompleteGalerkin(w,
ghostA,
aggregates_current,
R_row_offsets,
R_col_indices,
num_aggregates);
}
//from now on w will be destroyed again.
w.set_initialized(0);
//repair the original A matrix. its ia and ja are in w
if ( current_pass == 1 )
{
//swap back
w.row_offsets.swap( A.row_offsets );
w.col_indices.swap( A.col_indices );
//only in that case we have swapped the values
if ( full_ghost_level && !shrink_ghost_level )
{
w.values.swap( A.values );
}
//save the edge weights of the original level
A.diag.swap( w.diag );
A.set_initialized(1); //A is repaired now
//save the first aggregates into the original aggregate vector so we can merge them later
aggregates.swap( aggregates_current );
aggregates_global.swap( aggregates_global_current );
aggregates_initialized = true;
curA = &ghostA;
}
}
delete cag;
}
else
{
FatalError("Unsupported block size for MultiPairwise", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
}
// -------------------------
// Explict instantiations
// -------------------------
#define AMGX_CASE_LINE(CASE) template class MultiPairwiseSelectorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class MultiPairwiseSelector<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
}
|
0a7edcad86fe131da23743752f9fa40181511dc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scan.cuh"
#include "segmented_scan.cuh"
#include "segmented_scan_helpers.cuh"
#include <contrib/libs/cub/hipcub/hipcub.hpp>
namespace NKernel
{
template <class T>
__global__ void ZeroSegmentStartsImpl(const ui32* flags, ui32 flagMask, ui32 size, T* output) {
const ui32 tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
bool segmentStart = flags[tid] & flagMask;
if (segmentStart) {
output[tid] = 0;
}
}
}
template <typename T>
hipError_t SegmentedScanCub(const T* input, const ui32* flags, ui32 flagMask,
T* output,
ui32 size, bool inclusive,
TScanKernelContext<T>& context,
TCudaStream stream) {
if (inclusive) {
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, true>;
TInput inputIter(input, flags, flagMask);
TOutput outputIter(output, output + size);
return hipcub::DeviceScan::InclusiveScan<TInput, TOutput>(context.PartResults, context.NumParts, inputIter, outputIter, TSegmentedSum(), size, stream);
} else {
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, false>;
TInput inputIter(input, flags, flagMask);
TOutput outputIter(output, output + size);
hipError_t errorCode = hipcub::DeviceScan::InclusiveScan<TInput, TOutput>(context.PartResults, context.NumParts, inputIter, outputIter, TSegmentedSum(), size, stream);
{
ui32 blockSize = 256;
ui32 numBlocks = CeilDivide<ui32>(size, blockSize);
hipLaunchKernelGGL(( ZeroSegmentStartsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, flags, flagMask, size, output);
}
return errorCode;
}
}
template <class T>
ui64 SegmentedScanVectorTempSize(ui32 size, bool inclusive) {
(void)inclusive;
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, true>;
ui64 sizeInBytes = 0;
TInput fakeInput((T*)nullptr, (ui32*)nullptr, 0u);
TOutput fakeOutput((T*)nullptr, (T*)nullptr);
hipcub::DeviceScan::InclusiveScan<TInput, TOutput, TSegmentedSum>(nullptr, sizeInBytes, fakeInput, fakeOutput, TSegmentedSum(), size);
return sizeInBytes;
}
#define SEGMENTED_SCAN_CUB(Type)\
template hipError_t SegmentedScanCub<Type>(const Type* input, const ui32* flags, ui32 mask, Type* output, ui32 size, bool inclusive,\
TScanKernelContext<Type>& context, TCudaStream stream);
SEGMENTED_SCAN_CUB(float)
SEGMENTED_SCAN_CUB(double)
SEGMENTED_SCAN_CUB(int)
SEGMENTED_SCAN_CUB(ui32)
template ui64 SegmentedScanVectorTempSize<int>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<ui32>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<float>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<double>(ui32, bool);
}
| 0a7edcad86fe131da23743752f9fa40181511dc6.cu | #include "scan.cuh"
#include "segmented_scan.cuh"
#include "segmented_scan_helpers.cuh"
#include <contrib/libs/cub/cub/device/device_scan.cuh>
namespace NKernel
{
template <class T>
__global__ void ZeroSegmentStartsImpl(const ui32* flags, ui32 flagMask, ui32 size, T* output) {
const ui32 tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
bool segmentStart = flags[tid] & flagMask;
if (segmentStart) {
output[tid] = 0;
}
}
}
template <typename T>
cudaError_t SegmentedScanCub(const T* input, const ui32* flags, ui32 flagMask,
T* output,
ui32 size, bool inclusive,
TScanKernelContext<T>& context,
TCudaStream stream) {
if (inclusive) {
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, true>;
TInput inputIter(input, flags, flagMask);
TOutput outputIter(output, output + size);
return cub::DeviceScan::InclusiveScan<TInput, TOutput>(context.PartResults, context.NumParts, inputIter, outputIter, TSegmentedSum(), size, stream);
} else {
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, false>;
TInput inputIter(input, flags, flagMask);
TOutput outputIter(output, output + size);
cudaError_t errorCode = cub::DeviceScan::InclusiveScan<TInput, TOutput>(context.PartResults, context.NumParts, inputIter, outputIter, TSegmentedSum(), size, stream);
{
ui32 blockSize = 256;
ui32 numBlocks = CeilDivide<ui32>(size, blockSize);
ZeroSegmentStartsImpl<<<numBlocks, blockSize, 0, stream>>>(flags, flagMask, size, output);
}
return errorCode;
}
}
template <class T>
ui64 SegmentedScanVectorTempSize(ui32 size, bool inclusive) {
(void)inclusive;
using TInput = TSegmentedScanInputIterator<T>;
using TOutput = TSegmentedScanOutputIterator<T, true>;
ui64 sizeInBytes = 0;
TInput fakeInput((T*)nullptr, (ui32*)nullptr, 0u);
TOutput fakeOutput((T*)nullptr, (T*)nullptr);
cub::DeviceScan::InclusiveScan<TInput, TOutput, TSegmentedSum>(nullptr, sizeInBytes, fakeInput, fakeOutput, TSegmentedSum(), size);
return sizeInBytes;
}
#define SEGMENTED_SCAN_CUB(Type)\
template cudaError_t SegmentedScanCub<Type>(const Type* input, const ui32* flags, ui32 mask, Type* output, ui32 size, bool inclusive,\
TScanKernelContext<Type>& context, TCudaStream stream);
SEGMENTED_SCAN_CUB(float)
SEGMENTED_SCAN_CUB(double)
SEGMENTED_SCAN_CUB(int)
SEGMENTED_SCAN_CUB(ui32)
template ui64 SegmentedScanVectorTempSize<int>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<ui32>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<float>(ui32, bool);
template ui64 SegmentedScanVectorTempSize<double>(ui32, bool);
}
|
b53469bd60751005727892197ccdc458f69b2d65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <memory>
#include "tensors/gpu/cuda_helpers.h"
#include "tensors/tensor_operators.h"
#include "training/gradient_dropping/dropper.h"
#include "training/gradient_dropping/sparse_tensor.h"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/copy.h>
namespace marian {
namespace gpu {
__global__ void sampling(float* originalData,
float* data,
int size,
int scale,
int fullSize) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= size)
return;
data[idx] = abs(originalData[idx * scale]);
}
float GradientDropBase::find_threshold(Tensor grads, float rate) {
hipSetDevice(grads->getDevice().no);
int size = grads->size();
int threads = 512;
int sortSize = min(100000, size);
int blocksSample = 1 + sortSize / threads;
if (!tmp) {
tmp = newTensor(sortSize, grads->getBackend());
}
hipLaunchKernelGGL(( sampling), dim3(blocksSample), dim3(threads), 0, 0,
grads->data(), tmp->data(), sortSize, size / sortSize, size);
thrust::device_ptr<float> dev_data_ptr(tmp->data());
thrust::sort(dev_data_ptr, dev_data_ptr + sortSize);
int cut_index = ::max(0, (int)(sortSize * rate) - 1);
float t;
hipMemcpy(&t, tmp->data() + cut_index, sizeof(float), hipMemcpyDeviceToHost);
return t;
}
void GradientDropBase::dropGraph(Tensor grads,
SparseTensor destination,
float rate,
float momentum) {
// init
if(!residual) {
residual = newTensor(grads->size(), grads->getBackend());
step = 0;
}
if(!velocity && momentum > 0.0) {
velocity = newTensor(grads->size(), grads->getBackend());
}
// Step 1: add residual to the current gradient
{
using namespace functional;
marian::gpu::Element(_1 = _1 + _2, grads, residual);
}
// step 2: find threshold
float t = find_threshold(grads, rate);
// step 3: drop gradients lower than threshold
// store gradients lower than threshold into the residual
{
using namespace functional;
marian::gpu::Element(_1 = if_then_else(abs(_2) > t, 0, _2), residual, grads);
marian::gpu::Element(_1 = if_then_else(abs(_1) <= t, 0, _1), grads);
}
destination->fromDense(grads);
step++;
}
}
}
| b53469bd60751005727892197ccdc458f69b2d65.cu | #include <curand.h>
#include <curand_kernel.h>
#include <memory>
#include "tensors/gpu/cuda_helpers.h"
#include "tensors/tensor_operators.h"
#include "training/gradient_dropping/dropper.h"
#include "training/gradient_dropping/sparse_tensor.h"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/copy.h>
namespace marian {
namespace gpu {
__global__ void sampling(float* originalData,
float* data,
int size,
int scale,
int fullSize) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= size)
return;
data[idx] = abs(originalData[idx * scale]);
}
float GradientDropBase::find_threshold(Tensor grads, float rate) {
cudaSetDevice(grads->getDevice().no);
int size = grads->size();
int threads = 512;
int sortSize = min(100000, size);
int blocksSample = 1 + sortSize / threads;
if (!tmp) {
tmp = newTensor(sortSize, grads->getBackend());
}
sampling<<<blocksSample, threads>>>(
grads->data(), tmp->data(), sortSize, size / sortSize, size);
thrust::device_ptr<float> dev_data_ptr(tmp->data());
thrust::sort(dev_data_ptr, dev_data_ptr + sortSize);
int cut_index = std::max(0, (int)(sortSize * rate) - 1);
float t;
cudaMemcpy(&t, tmp->data() + cut_index, sizeof(float), cudaMemcpyDeviceToHost);
return t;
}
void GradientDropBase::dropGraph(Tensor grads,
SparseTensor destination,
float rate,
float momentum) {
// init
if(!residual) {
residual = newTensor(grads->size(), grads->getBackend());
step = 0;
}
if(!velocity && momentum > 0.0) {
velocity = newTensor(grads->size(), grads->getBackend());
}
// Step 1: add residual to the current gradient
{
using namespace functional;
marian::gpu::Element(_1 = _1 + _2, grads, residual);
}
// step 2: find threshold
float t = find_threshold(grads, rate);
// step 3: drop gradients lower than threshold
// store gradients lower than threshold into the residual
{
using namespace functional;
marian::gpu::Element(_1 = if_then_else(abs(_2) > t, 0, _2), residual, grads);
marian::gpu::Element(_1 = if_then_else(abs(_1) <= t, 0, _1), grads);
}
destination->fromDense(grads);
step++;
}
}
}
|
b35ed21a5b6e32c14ebf2ce6df45c5e26dcda369.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void calcSigmoidForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
v = 1.0f / (1.0f + exp( -v )); // sigmoid
out[id] = v;
}
/* original
for ( int i = 0; i < in_total_size; ++i ){
out.data[i] = activator_function(in.data[i]);
}
*/
} | b35ed21a5b6e32c14ebf2ce6df45c5e26dcda369.cu | #include "includes.h"
__global__ void calcSigmoidForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
v = 1.0f / (1.0f + exp( -v )); // sigmoid
out[id] = v;
}
/* original
for ( int i = 0; i < in_total_size; ++i ){
out.data[i] = activator_function(in.data[i]);
}
*/
} |
0a965403f84e4cd1121360db7ae8248ae746038a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Macros for indexing data arrays.
#define Ex(i,n) Ex_[(i)+Size*(n)]
#define Dx(i,n) Dx_[(i)+Size*(n)]
#define Hy(i,n) Hy_[(i)+Size*(n)]
#define By(i,n) By_[(i)+Size*(n)]
#include <FDTD1DDNG.hpp>
// Dry run kernel.
template <unsigned int BlockX, unsigned int BlockY> __global__ void FDTD1DDNGKernel_DryRun_M(
const unsigned int Size,
const unsigned int PulseWidth,
const unsigned int td,
const unsigned int SourceLocation,
const unsigned int SourceChoice,
const PRECISION e0,
const PRECISION u0,
const PRECISION dt,
const PRECISION dz,
const PRECISION Sc,
// Frequency, wavelength, wave number.
const PRECISION f,
const PRECISION fp,
const PRECISION dr,
// Data arrays.
const PRECISION *Ex_, PRECISION *Hy_,
// Incident field.
PRECISION *Exi,
const unsigned int x1,
// Time indices.
const unsigned int n,
const unsigned int np,
const unsigned int n0,
unsigned int nf)
{
const unsigned int i = BlockX*blockIdx.x+threadIdx.x;
if (i != Size-1) // Normal update equation.
Hy(i,nf) = Hy(i,n0) + (Ex(i,n0)-Ex(i+1,n0))*dt/(u0*dz);
__syncthreads();
// ABC
if (i == Size-1)
Hy(i,nf) = Hy(i-1,n0) + (Sc-1)/(Sc+1)*(Hy(i-1,nf)-Hy(i,n0));
}
template <unsigned int BlockX, unsigned int BlockY> __global__ void FDTD1DDNGKernel_DryRun_E(
const unsigned int Size,
const unsigned int PulseWidth,
const unsigned int td,
const unsigned int SourceLocation,
const unsigned int SourceChoice,
const PRECISION e0,
const PRECISION u0,
const PRECISION dt,
const PRECISION dz,
const PRECISION Sc,
// Frequency, wavelength, wave number.
const PRECISION f,
const PRECISION fp,
const PRECISION dr,
// Data arrays.
PRECISION *Ex_, const PRECISION *Hy_,
// Incident field.
PRECISION *Exi,
const unsigned int x1,
// Time indices.
const unsigned int n,
const unsigned int np,
const unsigned int n0,
const unsigned int nf)
{
const unsigned int i = BlockX*blockIdx.x+threadIdx.x;
if (i != 0)
Ex(i,nf) = Ex(i,n0) + (Hy(i-1,nf)-Hy(i,nf))*dt/(e0*dz);
__syncthreads();
// ABC
if (i == 0)
Ex(i,nf) = Ex(i+1,n0) + (Sc-1)/(Sc+1)*(Ex(i+1,nf)-Ex(i,n0));
__syncthreads();
// Source.
if (i == SourceLocation)
{
if (SourceChoice == 1)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + exp(-1.*pow(((PRECISION)n-(PRECISION)td)/((PRECISION)PulseWidth/4.),2)) * Sc;
}
else if (SourceChoice == 2)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + sin(2.*PI*f*(PRECISION)n*dt) * Sc;
}
else if (SourceChoice == 3)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + (1.-2.*pow(PI*fp*((PRECISION)n*dt-dr),2))*exp(-1.*pow(PI*fp*((PRECISION)n*dt-dr),2)) * Sc;
}
}
// Recording incident Field.
if (i == x1)
Exi[n] = Ex(i,nf);
}
// Simulation kernel.
template <unsigned int BlockX, unsigned int BlockY> __global__ void FDTD1DDNGKernel_Simulation_M(
const unsigned int Size,
const unsigned int PulseWidth,
const unsigned int td,
const unsigned int SourceLocation,
const unsigned int SourceChoice,
const PRECISION e0,
const PRECISION u0,
const PRECISION dt,
const PRECISION dz,
const PRECISION Sc,
// Frequency, wavelength, wave number.
const PRECISION f,
const PRECISION fp,
const PRECISION dr,
// Data arrays.
PRECISION *Ex_, PRECISION *Dx_, PRECISION *Hy_, PRECISION *By_,
// Drude material parameters.
PRECISION *einf, PRECISION *uinf, PRECISION *wpesq, PRECISION *wpmsq, PRECISION *ge, PRECISION *gm,
// Drude scalars.
PRECISION *ae0, PRECISION *ae, PRECISION *be, PRECISION *ce, PRECISION *de, PRECISION *ee,
PRECISION *am0, PRECISION *am, PRECISION *bm, PRECISION *cm, PRECISION *dm, PRECISION *em,
// Incident field.
PRECISION *Ext,
PRECISION *Extt,
PRECISION *Exz1,
PRECISION *Exz2,
const unsigned int x1,
const unsigned int Z1,
const unsigned int Z2,
// Time indices.
const unsigned int n,
const unsigned int np,
const unsigned int n0,
const unsigned int nf)
{
unsigned int i = BlockX*blockIdx.x+threadIdx.x;
if (i != Size-1) // Normal update equation.
{
By(i,nf) = By(i,n0) + (Ex(i,n0)-Ex(i+1,n0))*dt/dz;
Hy(i,nf) = am[i]*(By(i,nf)-2*By(i,n0)+By(i,np)) + bm[i]*(By(i,nf)-By(i,np)) + cm[i]*(2*Hy(i,n0)-Hy(i,np)) + dm[i]*(2*Hy(i,n0)+Hy(i,np)) + em[i]*(Hy(i,np));
}
__syncthreads();
// ABC
if (i == Size-1)
{
Hy(i,nf) = Hy(i-1,n0) + (Sc-1)/(Sc+1)*(Hy(i-1,nf)-Hy(i,n0));
By(i,nf) = u0*Hy(i,nf);
}
}
template <unsigned int BlockX, unsigned int BlockY> __global__ void FDTD1DDNGKernel_Simulation_E(
const unsigned int Size,
const unsigned int PulseWidth,
const unsigned int td,
const unsigned int SourceLocation,
const unsigned int SourceChoice,
const PRECISION e0,
const PRECISION u0,
const PRECISION dt,
const PRECISION dz,
const PRECISION Sc,
// Frequency, wavelength, wave number.
const PRECISION f,
const PRECISION fp,
const PRECISION dr,
// Data arrays.
PRECISION *Ex_, PRECISION *Dx_, PRECISION *Hy_, PRECISION *By_,
// Drude material parameters.
PRECISION *einf, PRECISION *uinf, PRECISION *wpesq, PRECISION *wpmsq, PRECISION *ge, PRECISION *gm,
// Drude scalars.
PRECISION *ae0, PRECISION *ae, PRECISION *be, PRECISION *ce, PRECISION *de, PRECISION *ee,
PRECISION *am0, PRECISION *am, PRECISION *bm, PRECISION *cm, PRECISION *dm, PRECISION *em,
// Incident field.
PRECISION *Ext,
PRECISION *Extt,
PRECISION *Exz1,
PRECISION *Exz2,
const unsigned int x1,
const unsigned int Z1,
const unsigned int Z2,
// Time indices.
const unsigned int n,
const unsigned int np,
const unsigned int n0,
const unsigned int nf)
{
unsigned int i = BlockX*blockIdx.x+threadIdx.x;
if (i != 0)
{
Dx(i,nf) = Dx(i,n0) + (Hy(i-1,nf)-Hy(i,nf))*dt/dz;
Ex(i,nf) = ae[i]*(Dx(i,nf)-2*Dx(i,n0)+Dx(i,np)) + be[i]*(Dx(i,nf)-Dx(i,np)) + ce[i]*(2*Ex(i,n0)-Ex(i,np)) + de[i]*(2*Ex(i,n0)+Ex(i,np)) + ee[i]*(Ex(i,np));
}
__syncthreads();
// ABC
if (i == 0)
{
Ex(i,nf) = Ex(i+1,n0) + (Sc-1)/(Sc+1)*(Ex(i+1,nf)-Ex(i,n0));
Dx(i,nf) = e0*Ex(i,nf);
}
__syncthreads();
// Source.
if (i == SourceLocation)
{
if (SourceChoice == 1)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + exp(-1.*pow(((PRECISION)n-(PRECISION)td)/((PRECISION)PulseWidth/4.),2)) * Sc;
}
else if (SourceChoice == 2)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + sin(2.*PI*f*(PRECISION)n*dt) * Sc;
}
else if (SourceChoice == 3)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + (1.-2.*pow(PI*fp*((PRECISION)n*dt-dr),2))*exp(-1.*pow(PI*fp*((PRECISION)n*dt-dr),2)) * Sc;
}
Dx(SourceLocation,nf) = e0*Ex(SourceLocation,nf);
}
// Recording transmitted Fields.
if (i == x1)
Ext[n] = Ex(i,nf);
if (i == (Size-(2*Size/3))+10)
Extt[n] = Ex(i,nf);
if (i == Z1)
Exz1[n] = Ex(i,nf);
if (i == Z2)
Exz2[n] = Ex(i,nf);
}
| 0a965403f84e4cd1121360db7ae8248ae746038a.cu | // Macros for indexing data arrays.
#define Ex(i,n) Ex_[(i)+Size*(n)]
#define Dx(i,n) Dx_[(i)+Size*(n)]
#define Hy(i,n) Hy_[(i)+Size*(n)]
#define By(i,n) By_[(i)+Size*(n)]
#include <FDTD1DDNG.hpp>
// Dry run kernel.
template <unsigned int BlockX, unsigned int BlockY> __global__ void FDTD1DDNGKernel_DryRun_M(
const unsigned int Size,
const unsigned int PulseWidth,
const unsigned int td,
const unsigned int SourceLocation,
const unsigned int SourceChoice,
const PRECISION e0,
const PRECISION u0,
const PRECISION dt,
const PRECISION dz,
const PRECISION Sc,
// Frequency, wavelength, wave number.
const PRECISION f,
const PRECISION fp,
const PRECISION dr,
// Data arrays.
const PRECISION *Ex_, PRECISION *Hy_,
// Incident field.
PRECISION *Exi,
const unsigned int x1,
// Time indices.
const unsigned int n,
const unsigned int np,
const unsigned int n0,
unsigned int nf)
{
const unsigned int i = BlockX*blockIdx.x+threadIdx.x;
if (i != Size-1) // Normal update equation.
Hy(i,nf) = Hy(i,n0) + (Ex(i,n0)-Ex(i+1,n0))*dt/(u0*dz);
__syncthreads();
// ABC
if (i == Size-1)
Hy(i,nf) = Hy(i-1,n0) + (Sc-1)/(Sc+1)*(Hy(i-1,nf)-Hy(i,n0));
}
template <unsigned int BlockX, unsigned int BlockY> __global__ void FDTD1DDNGKernel_DryRun_E(
const unsigned int Size,
const unsigned int PulseWidth,
const unsigned int td,
const unsigned int SourceLocation,
const unsigned int SourceChoice,
const PRECISION e0,
const PRECISION u0,
const PRECISION dt,
const PRECISION dz,
const PRECISION Sc,
// Frequency, wavelength, wave number.
const PRECISION f,
const PRECISION fp,
const PRECISION dr,
// Data arrays.
PRECISION *Ex_, const PRECISION *Hy_,
// Incident field.
PRECISION *Exi,
const unsigned int x1,
// Time indices.
const unsigned int n,
const unsigned int np,
const unsigned int n0,
const unsigned int nf)
{
const unsigned int i = BlockX*blockIdx.x+threadIdx.x;
if (i != 0)
Ex(i,nf) = Ex(i,n0) + (Hy(i-1,nf)-Hy(i,nf))*dt/(e0*dz);
__syncthreads();
// ABC
if (i == 0)
Ex(i,nf) = Ex(i+1,n0) + (Sc-1)/(Sc+1)*(Ex(i+1,nf)-Ex(i,n0));
__syncthreads();
// Source.
if (i == SourceLocation)
{
if (SourceChoice == 1)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + exp(-1.*pow(((PRECISION)n-(PRECISION)td)/((PRECISION)PulseWidth/4.),2)) * Sc;
}
else if (SourceChoice == 2)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + sin(2.*PI*f*(PRECISION)n*dt) * Sc;
}
else if (SourceChoice == 3)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + (1.-2.*pow(PI*fp*((PRECISION)n*dt-dr),2))*exp(-1.*pow(PI*fp*((PRECISION)n*dt-dr),2)) * Sc;
}
}
// Recording incident Field.
if (i == x1)
Exi[n] = Ex(i,nf);
}
// Simulation kernel.
template <unsigned int BlockX, unsigned int BlockY> __global__ void FDTD1DDNGKernel_Simulation_M(
const unsigned int Size,
const unsigned int PulseWidth,
const unsigned int td,
const unsigned int SourceLocation,
const unsigned int SourceChoice,
const PRECISION e0,
const PRECISION u0,
const PRECISION dt,
const PRECISION dz,
const PRECISION Sc,
// Frequency, wavelength, wave number.
const PRECISION f,
const PRECISION fp,
const PRECISION dr,
// Data arrays.
PRECISION *Ex_, PRECISION *Dx_, PRECISION *Hy_, PRECISION *By_,
// Drude material parameters.
PRECISION *einf, PRECISION *uinf, PRECISION *wpesq, PRECISION *wpmsq, PRECISION *ge, PRECISION *gm,
// Drude scalars.
PRECISION *ae0, PRECISION *ae, PRECISION *be, PRECISION *ce, PRECISION *de, PRECISION *ee,
PRECISION *am0, PRECISION *am, PRECISION *bm, PRECISION *cm, PRECISION *dm, PRECISION *em,
// Incident field.
PRECISION *Ext,
PRECISION *Extt,
PRECISION *Exz1,
PRECISION *Exz2,
const unsigned int x1,
const unsigned int Z1,
const unsigned int Z2,
// Time indices.
const unsigned int n,
const unsigned int np,
const unsigned int n0,
const unsigned int nf)
{
unsigned int i = BlockX*blockIdx.x+threadIdx.x;
if (i != Size-1) // Normal update equation.
{
By(i,nf) = By(i,n0) + (Ex(i,n0)-Ex(i+1,n0))*dt/dz;
Hy(i,nf) = am[i]*(By(i,nf)-2*By(i,n0)+By(i,np)) + bm[i]*(By(i,nf)-By(i,np)) + cm[i]*(2*Hy(i,n0)-Hy(i,np)) + dm[i]*(2*Hy(i,n0)+Hy(i,np)) + em[i]*(Hy(i,np));
}
__syncthreads();
// ABC
if (i == Size-1)
{
Hy(i,nf) = Hy(i-1,n0) + (Sc-1)/(Sc+1)*(Hy(i-1,nf)-Hy(i,n0));
By(i,nf) = u0*Hy(i,nf);
}
}
template <unsigned int BlockX, unsigned int BlockY> __global__ void FDTD1DDNGKernel_Simulation_E(
const unsigned int Size,
const unsigned int PulseWidth,
const unsigned int td,
const unsigned int SourceLocation,
const unsigned int SourceChoice,
const PRECISION e0,
const PRECISION u0,
const PRECISION dt,
const PRECISION dz,
const PRECISION Sc,
// Frequency, wavelength, wave number.
const PRECISION f,
const PRECISION fp,
const PRECISION dr,
// Data arrays.
PRECISION *Ex_, PRECISION *Dx_, PRECISION *Hy_, PRECISION *By_,
// Drude material parameters.
PRECISION *einf, PRECISION *uinf, PRECISION *wpesq, PRECISION *wpmsq, PRECISION *ge, PRECISION *gm,
// Drude scalars.
PRECISION *ae0, PRECISION *ae, PRECISION *be, PRECISION *ce, PRECISION *de, PRECISION *ee,
PRECISION *am0, PRECISION *am, PRECISION *bm, PRECISION *cm, PRECISION *dm, PRECISION *em,
// Incident field.
PRECISION *Ext,
PRECISION *Extt,
PRECISION *Exz1,
PRECISION *Exz2,
const unsigned int x1,
const unsigned int Z1,
const unsigned int Z2,
// Time indices.
const unsigned int n,
const unsigned int np,
const unsigned int n0,
const unsigned int nf)
{
unsigned int i = BlockX*blockIdx.x+threadIdx.x;
if (i != 0)
{
Dx(i,nf) = Dx(i,n0) + (Hy(i-1,nf)-Hy(i,nf))*dt/dz;
Ex(i,nf) = ae[i]*(Dx(i,nf)-2*Dx(i,n0)+Dx(i,np)) + be[i]*(Dx(i,nf)-Dx(i,np)) + ce[i]*(2*Ex(i,n0)-Ex(i,np)) + de[i]*(2*Ex(i,n0)+Ex(i,np)) + ee[i]*(Ex(i,np));
}
__syncthreads();
// ABC
if (i == 0)
{
Ex(i,nf) = Ex(i+1,n0) + (Sc-1)/(Sc+1)*(Ex(i+1,nf)-Ex(i,n0));
Dx(i,nf) = e0*Ex(i,nf);
}
__syncthreads();
// Source.
if (i == SourceLocation)
{
if (SourceChoice == 1)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + exp(-1.*pow(((PRECISION)n-(PRECISION)td)/((PRECISION)PulseWidth/4.),2)) * Sc;
}
else if (SourceChoice == 2)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + sin(2.*PI*f*(PRECISION)n*dt) * Sc;
}
else if (SourceChoice == 3)
{
Ex(SourceLocation,nf) = Ex(SourceLocation,nf) + (1.-2.*pow(PI*fp*((PRECISION)n*dt-dr),2))*exp(-1.*pow(PI*fp*((PRECISION)n*dt-dr),2)) * Sc;
}
Dx(SourceLocation,nf) = e0*Ex(SourceLocation,nf);
}
// Recording transmitted Fields.
if (i == x1)
Ext[n] = Ex(i,nf);
if (i == (Size-(2*Size/3))+10)
Extt[n] = Ex(i,nf);
if (i == Z1)
Exz1[n] = Ex(i,nf);
if (i == Z2)
Exz2[n] = Ex(i,nf);
}
|
02951fba742e581984b78e42732cdd8b2d190712.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <sstream>
#include <string>
#include <stdio.h>
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
class Formatter {
public:
Formatter() {}
~Formatter() {}
template <typename Type> Formatter &operator<<(const Type &value) {
stream_ << value;
return *this;
}
std::string str() const { return stream_.str(); }
operator std::string() const { return stream_.str(); }
enum ConvertToString { to_str };
std::string operator>>(ConvertToString) { return stream_.str(); }
private:
std::stringstream stream_;
Formatter(const Formatter &);
Formatter &operator=(Formatter &);
};
__global__ void approxmatchkernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){
float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
float multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
const int Block=1024;
__shared__ float buf[Block*4];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
//for (int j=7;j>=-2;j--){
for (int j=7;j>-2;j--){
float level=-powf(4.0f,j);
if (j==-2){
level=0;
}
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
float suml=1e-9f;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
float x2=xyz2[i*m*3+l0*3+l*3+0];
float y2=xyz2[i*m*3+l0*3+l*3+1];
float z2=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+0]=x2;
buf[l*4+1]=y2;
buf[l*4+2]=z2;
buf[l*4+3]=remainR[l0+l];
}
__syncthreads();
for (int l=0;l<lend;l++){
float x2=buf[l*4+0];
float y2=buf[l*4+1];
float z2=buf[l*4+2];
float d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
float w=__expf(d)*buf[l*4+3];
suml+=w;
}
__syncthreads();
}
if (k<n)
ratioL[k]=remainL[k]/suml;
}
/*for (int k=threadIdx.x;k<n;k+=gridDim.x){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float suml=1e-9f;
for (int l=0;l<m;l++){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*remainR[l];
suml+=w;
}
ratioL[k]=remainL[k]/suml;
}*/
__syncthreads();
for (int l0=0;l0<m;l0+=blockDim.x){
int l=l0+threadIdx.x;
float x2=0,y2=0,z2=0;
if (l<m){
x2=xyz2[i*m*3+l*3+0];
y2=xyz2[i*m*3+l*3+1];
z2=xyz2[i*m*3+l*3+2];
}
float sumr=0;
for (int k0=0;k0<n;k0+=Block){
int kend=min(n,k0+Block)-k0;
for (int k=threadIdx.x;k<kend;k+=blockDim.x){
buf[k*4+0]=xyz1[i*n*3+k0*3+k*3+0];
buf[k*4+1]=xyz1[i*n*3+k0*3+k*3+1];
buf[k*4+2]=xyz1[i*n*3+k0*3+k*3+2];
buf[k*4+3]=ratioL[k0+k];
}
__syncthreads();
for (int k=0;k<kend;k++){
float x1=buf[k*4+0];
float y1=buf[k*4+1];
float z1=buf[k*4+2];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*buf[k*4+3];
sumr+=w;
}
__syncthreads();
}
if (l<m){
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}
}
/*for (int l=threadIdx.x;l<m;l+=blockDim.x){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float sumr=0;
for (int k=0;k<n;k++){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k];
sumr+=w;
}
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}*/
__syncthreads();
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
float suml=0;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
buf[l*4+0]=xyz2[i*m*3+l0*3+l*3+0];
buf[l*4+1]=xyz2[i*m*3+l0*3+l*3+1];
buf[l*4+2]=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+3]=ratioR[l0+l];
}
__syncthreads();
float rl=ratioL[k];
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*4+0];
float y2=buf[l*4+1];
float z2=buf[l*4+2];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*rl*buf[l*4+3];
match[i*n*m+(l0+l)*n+k]+=w;
suml+=w;
}
}
__syncthreads();
}
if (k<n)
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
/*for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float suml=0;
for (int l=0;l<m;l++){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k]*ratioR[l];
match[i*n*m+l*n+k]+=w;
suml+=w;
}
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}*/
__syncthreads();
}
}
}
__global__ void matchcostkernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){
__shared__ float allsum[512];
const int Block=256;
__shared__ float buf[Block*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float subsum=0;
for (int k0=0;k0<m;k0+=Block){
int endk=min(m,k0+Block);
for (int k=threadIdx.x;k<(endk-k0)*3;k+=blockDim.x){
buf[k]=xyz2[i*m*3+k0*3+k];
}
__syncthreads();
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
for (int k=0;k<endk-k0;k++){
//float x2=xyz2[(i*m+k)*3+0]-x1;
//float y2=xyz2[(i*m+k)*3+1]-y1;
//float z2=xyz2[(i*m+k)*3+2]-z1;
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=sqrtf(x2*x2+y2*y2+z2*z2);
subsum+=match[i*n*m+(k0+k)*n+j]*d;
}
}
__syncthreads();
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
//void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){
// matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out);
//}
__global__ void matchcostgrad2kernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){
__shared__ float sum_grad[256*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
float x2=xyz2[(i*m+k)*3+0];
float y2=xyz2[(i*m+k)*3+1];
float z2=xyz2[(i*m+k)*3+2];
float subsumx=0,subsumy=0,subsumz=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=x2-xyz1[(i*n+j)*3+0];
float y1=y2-xyz1[(i*n+j)*3+1];
float z1=z2-xyz1[(i*n+j)*3+2];
float d=match[i*n*m+k*n+j]*rsqrtf(fmaxf(x1*x1+y1*y1+z1*z1,1e-20f));
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
}
sum_grad[threadIdx.x*3+0]=subsumx;
sum_grad[threadIdx.x*3+1]=subsumy;
sum_grad[threadIdx.x*3+2]=subsumz;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*3+0]+=sum_grad[j2*3+0];
sum_grad[j1*3+1]+=sum_grad[j2*3+1];
sum_grad[j1*3+2]+=sum_grad[j2*3+2];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*3+0]=sum_grad[0];
grad2[(i*m+k)*3+1]=sum_grad[1];
grad2[(i*m+k)*3+2]=sum_grad[2];
}
__syncthreads();
}
}
}
__global__ void matchcostgrad1kernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad1){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int l=threadIdx.x;l<n;l+=blockDim.x){
float x1=xyz1[i*n*3+l*3+0];
float y1=xyz1[i*n*3+l*3+1];
float z1=xyz1[i*n*3+l*3+2];
float dx=0,dy=0,dz=0;
for (int k=0;k<m;k++){
float x2=xyz2[i*m*3+k*3+0];
float y2=xyz2[i*m*3+k*3+1];
float z2=xyz2[i*m*3+k*3+2];
float d=match[i*n*m+k*n+l]*rsqrtf(fmaxf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2),1e-20f));
dx+=(x1-x2)*d;
dy+=(y1-y2)*d;
dz+=(z1-z2)*d;
}
grad1[i*n*3+l*3+0]=dx;
grad1[i*n*3+l*3+1]=dy;
grad1[i*n*3+l*3+2]=dz;
}
}
}
//void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad2){
// matchcostgrad<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2);
//}
/*void AddGPUKernel(Dtype *in_a, Dtype *in_b, Dtype *out_c, int N,
hipStream_t stream)*/
// temp: TensorShape{b,(n+m)*2}
void approxmatch(int b,int n,int m,const float * xyz1,const float * xyz2,float * match,float * temp, hipStream_t stream){
hipLaunchKernelGGL(( approxmatchkernel)
, dim3(32), dim3(512), 0, stream, b,n,m,xyz1,xyz2,match,temp);
hipError_t err = hipGetLastError();
if (hipSuccess != err)
throw std::runtime_error(Formatter()
<< "CUDA kernel failed : " << std::to_string(err));
}
void matchcost(int b,int n,int m,const float * xyz1,const float * xyz2,float * match, float * out, hipStream_t stream){
hipLaunchKernelGGL(( matchcostkernel), dim3(32),dim3(512),0,stream, b,n,m,xyz1,xyz2,match,out);
hipError_t err = hipGetLastError();
if (hipSuccess != err)
throw std::runtime_error(Formatter()
<< "CUDA kernel failed : " << std::to_string(err));
}
void matchcostgrad(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad1,float * grad2, hipStream_t stream){
hipLaunchKernelGGL(( matchcostgrad1kernel), dim3(32),dim3(512),0,stream, b,n,m,xyz1,xyz2,match,grad1);
hipLaunchKernelGGL(( matchcostgrad2kernel), dim3(dim3(32,32)),dim3(256),0,stream, b,n,m,xyz1,xyz2,match,grad2);
hipError_t err = hipGetLastError();
if (hipSuccess != err)
throw std::runtime_error(Formatter()
<< "CUDA kernel failed : " << std::to_string(err));
}
| 02951fba742e581984b78e42732cdd8b2d190712.cu | #include <iostream>
#include <sstream>
#include <string>
#include <stdio.h>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
class Formatter {
public:
Formatter() {}
~Formatter() {}
template <typename Type> Formatter &operator<<(const Type &value) {
stream_ << value;
return *this;
}
std::string str() const { return stream_.str(); }
operator std::string() const { return stream_.str(); }
enum ConvertToString { to_str };
std::string operator>>(ConvertToString) { return stream_.str(); }
private:
std::stringstream stream_;
Formatter(const Formatter &);
Formatter &operator=(Formatter &);
};
__global__ void approxmatchkernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){
float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
float multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
const int Block=1024;
__shared__ float buf[Block*4];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
//for (int j=7;j>=-2;j--){
for (int j=7;j>-2;j--){
float level=-powf(4.0f,j);
if (j==-2){
level=0;
}
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
float suml=1e-9f;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
float x2=xyz2[i*m*3+l0*3+l*3+0];
float y2=xyz2[i*m*3+l0*3+l*3+1];
float z2=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+0]=x2;
buf[l*4+1]=y2;
buf[l*4+2]=z2;
buf[l*4+3]=remainR[l0+l];
}
__syncthreads();
for (int l=0;l<lend;l++){
float x2=buf[l*4+0];
float y2=buf[l*4+1];
float z2=buf[l*4+2];
float d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
float w=__expf(d)*buf[l*4+3];
suml+=w;
}
__syncthreads();
}
if (k<n)
ratioL[k]=remainL[k]/suml;
}
/*for (int k=threadIdx.x;k<n;k+=gridDim.x){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float suml=1e-9f;
for (int l=0;l<m;l++){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*remainR[l];
suml+=w;
}
ratioL[k]=remainL[k]/suml;
}*/
__syncthreads();
for (int l0=0;l0<m;l0+=blockDim.x){
int l=l0+threadIdx.x;
float x2=0,y2=0,z2=0;
if (l<m){
x2=xyz2[i*m*3+l*3+0];
y2=xyz2[i*m*3+l*3+1];
z2=xyz2[i*m*3+l*3+2];
}
float sumr=0;
for (int k0=0;k0<n;k0+=Block){
int kend=min(n,k0+Block)-k0;
for (int k=threadIdx.x;k<kend;k+=blockDim.x){
buf[k*4+0]=xyz1[i*n*3+k0*3+k*3+0];
buf[k*4+1]=xyz1[i*n*3+k0*3+k*3+1];
buf[k*4+2]=xyz1[i*n*3+k0*3+k*3+2];
buf[k*4+3]=ratioL[k0+k];
}
__syncthreads();
for (int k=0;k<kend;k++){
float x1=buf[k*4+0];
float y1=buf[k*4+1];
float z1=buf[k*4+2];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*buf[k*4+3];
sumr+=w;
}
__syncthreads();
}
if (l<m){
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}
}
/*for (int l=threadIdx.x;l<m;l+=blockDim.x){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float sumr=0;
for (int k=0;k<n;k++){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k];
sumr+=w;
}
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}*/
__syncthreads();
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
float suml=0;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
buf[l*4+0]=xyz2[i*m*3+l0*3+l*3+0];
buf[l*4+1]=xyz2[i*m*3+l0*3+l*3+1];
buf[l*4+2]=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+3]=ratioR[l0+l];
}
__syncthreads();
float rl=ratioL[k];
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*4+0];
float y2=buf[l*4+1];
float z2=buf[l*4+2];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*rl*buf[l*4+3];
match[i*n*m+(l0+l)*n+k]+=w;
suml+=w;
}
}
__syncthreads();
}
if (k<n)
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
/*for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float suml=0;
for (int l=0;l<m;l++){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k]*ratioR[l];
match[i*n*m+l*n+k]+=w;
suml+=w;
}
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}*/
__syncthreads();
}
}
}
__global__ void matchcostkernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){
__shared__ float allsum[512];
const int Block=256;
__shared__ float buf[Block*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float subsum=0;
for (int k0=0;k0<m;k0+=Block){
int endk=min(m,k0+Block);
for (int k=threadIdx.x;k<(endk-k0)*3;k+=blockDim.x){
buf[k]=xyz2[i*m*3+k0*3+k];
}
__syncthreads();
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
for (int k=0;k<endk-k0;k++){
//float x2=xyz2[(i*m+k)*3+0]-x1;
//float y2=xyz2[(i*m+k)*3+1]-y1;
//float z2=xyz2[(i*m+k)*3+2]-z1;
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=sqrtf(x2*x2+y2*y2+z2*z2);
subsum+=match[i*n*m+(k0+k)*n+j]*d;
}
}
__syncthreads();
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
//void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){
// matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out);
//}
__global__ void matchcostgrad2kernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){
__shared__ float sum_grad[256*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
float x2=xyz2[(i*m+k)*3+0];
float y2=xyz2[(i*m+k)*3+1];
float z2=xyz2[(i*m+k)*3+2];
float subsumx=0,subsumy=0,subsumz=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=x2-xyz1[(i*n+j)*3+0];
float y1=y2-xyz1[(i*n+j)*3+1];
float z1=z2-xyz1[(i*n+j)*3+2];
float d=match[i*n*m+k*n+j]*rsqrtf(fmaxf(x1*x1+y1*y1+z1*z1,1e-20f));
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
}
sum_grad[threadIdx.x*3+0]=subsumx;
sum_grad[threadIdx.x*3+1]=subsumy;
sum_grad[threadIdx.x*3+2]=subsumz;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*3+0]+=sum_grad[j2*3+0];
sum_grad[j1*3+1]+=sum_grad[j2*3+1];
sum_grad[j1*3+2]+=sum_grad[j2*3+2];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*3+0]=sum_grad[0];
grad2[(i*m+k)*3+1]=sum_grad[1];
grad2[(i*m+k)*3+2]=sum_grad[2];
}
__syncthreads();
}
}
}
__global__ void matchcostgrad1kernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad1){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int l=threadIdx.x;l<n;l+=blockDim.x){
float x1=xyz1[i*n*3+l*3+0];
float y1=xyz1[i*n*3+l*3+1];
float z1=xyz1[i*n*3+l*3+2];
float dx=0,dy=0,dz=0;
for (int k=0;k<m;k++){
float x2=xyz2[i*m*3+k*3+0];
float y2=xyz2[i*m*3+k*3+1];
float z2=xyz2[i*m*3+k*3+2];
float d=match[i*n*m+k*n+l]*rsqrtf(fmaxf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2),1e-20f));
dx+=(x1-x2)*d;
dy+=(y1-y2)*d;
dz+=(z1-z2)*d;
}
grad1[i*n*3+l*3+0]=dx;
grad1[i*n*3+l*3+1]=dy;
grad1[i*n*3+l*3+2]=dz;
}
}
}
//void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad2){
// matchcostgrad<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2);
//}
/*void AddGPUKernel(Dtype *in_a, Dtype *in_b, Dtype *out_c, int N,
cudaStream_t stream)*/
// temp: TensorShape{b,(n+m)*2}
void approxmatch(int b,int n,int m,const float * xyz1,const float * xyz2,float * match,float * temp, cudaStream_t stream){
approxmatchkernel
<<<32, 512, 0, stream>>>(b,n,m,xyz1,xyz2,match,temp);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
throw std::runtime_error(Formatter()
<< "CUDA kernel failed : " << std::to_string(err));
}
void matchcost(int b,int n,int m,const float * xyz1,const float * xyz2,float * match, float * out, cudaStream_t stream){
matchcostkernel<<<32,512,0,stream>>>(b,n,m,xyz1,xyz2,match,out);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
throw std::runtime_error(Formatter()
<< "CUDA kernel failed : " << std::to_string(err));
}
void matchcostgrad(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad1,float * grad2, cudaStream_t stream){
matchcostgrad1kernel<<<32,512,0,stream>>>(b,n,m,xyz1,xyz2,match,grad1);
matchcostgrad2kernel<<<dim3(32,32),256,0,stream>>>(b,n,m,xyz1,xyz2,match,grad2);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
throw std::runtime_error(Formatter()
<< "CUDA kernel failed : " << std::to_string(err));
}
|
b6a4893bd52c9d9e267239763617a3ad86e09268.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlag2c.cu, mixed zc -> ds, Wed Jan 2 14:18:50 2019
@author Mark Gates
*/
#include "magma_internal.h"
// mixed precision generation has issues with SINGLE PRECISION, so use PRECISION_z
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlat2s and zlaset.
*/
__global__
void dlag2s_kernel(
int m, int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) );
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) );
}
}
}
}
/***************************************************************************//**
Purpose
-------
DLAG2S converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAG2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. m >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the m-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,m).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the m-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,m).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_dlag2s(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,m) )
*info = -4;
else if ( ldsa < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
hipLaunchKernelGGL(( dlag2s_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, A, lda, SA, ldsa, rmax );
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
| b6a4893bd52c9d9e267239763617a3ad86e09268.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlag2c.cu, mixed zc -> ds, Wed Jan 2 14:18:50 2019
@author Mark Gates
*/
#include "magma_internal.h"
// mixed precision generation has issues with SINGLE PRECISION, so use PRECISION_z
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlat2s and zlaset.
*/
__global__
void dlag2s_kernel(
int m, int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) );
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) );
}
}
}
}
/***************************************************************************//**
Purpose
-------
DLAG2S converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAG2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. m >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the m-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,m).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the m-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,m).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_dlag2s(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,m) )
*info = -4;
else if ( ldsa < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
dlag2s_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, A, lda, SA, ldsa, rmax );
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
|
0ada095fa1619cff55363952a9a28332b7b75c16.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <bulk/bulk.hpp>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/reduce.h>
#include <thrust/extrema.h>
#include <cassert>
#include <iostream>
#include "time_invocation_cuda.hpp"
#include "decomposition.hpp"
struct reduce_partitions
{
template<typename ConcurrentGroup, typename Iterator1, typename Iterator2, typename T, typename BinaryOperation>
__device__
void operator()(ConcurrentGroup &this_group, Iterator1 first, Iterator1 last, Iterator2 result, T init, BinaryOperation binary_op)
{
T sum = bulk::reduce(this_group, first, last, init, binary_op);
if(this_group.this_exec.index() == 0)
{
*result = sum;
}
}
template<typename ConcurrentGroup, typename Iterator1, typename Iterator2, typename BinaryOperation>
__device__
void operator()(ConcurrentGroup &this_group, Iterator1 first, Iterator1 last, Iterator2 result, BinaryOperation binary_op)
{
// noticeably faster to pass the last element as the init
typename thrust::iterator_value<Iterator2>::type init = last[-1];
(*this)(this_group, first, last - 1, result, init, binary_op);
}
template<typename ConcurrentGroup, typename Iterator1, typename Decomposition, typename Iterator2, typename T, typename BinaryFunction>
__device__
void operator()(ConcurrentGroup &this_group, Iterator1 first, Decomposition decomp, Iterator2 result, T init, BinaryFunction binary_op)
{
typename Decomposition::range range = decomp[this_group.index()];
Iterator1 last = first + range.second;
first += range.first;
if(this_group.index() != 0)
{
// noticeably faster to pass the last element as the init
init = last[-1];
--last;
} // end if
(*this)(this_group, first, last, result + this_group.index(), init, binary_op);
}
};
template<typename RandomAccessIterator,
typename T,
typename BinaryOperation>
T my_reduce(RandomAccessIterator first, RandomAccessIterator last, T init, BinaryOperation binary_op)
{
typedef typename thrust::iterator_difference<RandomAccessIterator>::type size_type;
const size_type n = last - first;
if(n <= 0) return init;
const size_type groupsize = 128;
const size_type grainsize = 7;
const size_type tile_size = groupsize * grainsize;
const size_type num_tiles = (n + tile_size - 1) / tile_size;
const size_type subscription = 10;
bulk::concurrent_group<
bulk::agent<grainsize>,
groupsize
> g;
const size_type num_groups = thrust::min<size_type>(subscription * g.hardware_concurrency(), num_tiles);
aligned_decomposition<size_type> decomp(n, num_groups, tile_size);
thrust::hip::tag t;
thrust::detail::temporary_array<T,thrust::hip::tag> partial_sums(t, decomp.size());
// reduce into partial sums
bulk::async(bulk::par(g, decomp.size()), reduce_partitions(), bulk::root.this_exec, first, decomp, partial_sums.begin(), init, binary_op);
if(partial_sums.size() > 1)
{
// reduce the partial sums
bulk::async(g, reduce_partitions(), bulk::root, partial_sums.begin(), partial_sums.end(), partial_sums.begin(), binary_op);
} // end while
return partial_sums[0];
} // end my_reduce()
template<typename T>
T my_reduce(const thrust::device_vector<T> *vec)
{
return my_reduce(vec->begin(), vec->end(), T(0), thrust::plus<T>());
}
template<typename T>
T thrust_reduce(const thrust::device_vector<T> *vec)
{
return thrust::reduce(vec->begin(), vec->end(), T(0), thrust::plus<T>());
}
template<typename T>
void compare()
{
thrust::device_vector<T> vec(1 << 28);
thrust_reduce(&vec);
double thrust_msecs = time_invocation_cuda(50, thrust_reduce<T>, &vec);
my_reduce(&vec);
double my_msecs = time_invocation_cuda(50, my_reduce<T>, &vec);
std::cout << "Thrust's time: " << thrust_msecs << " ms" << std::endl;
std::cout << "My time: " << my_msecs << " ms" << std::endl;
std::cout << "Performance relative to Thrust: " << thrust_msecs / my_msecs << std::endl;
}
int main()
{
size_t n = 123456789;
thrust::device_vector<int> vec(n);
thrust::sequence(vec.begin(), vec.end());
int my_result = my_reduce(vec.begin(), vec.end(), 13, thrust::plus<int>());
std::cout << "my_result: " << my_result << std::endl;
int thrust_result = thrust::reduce(vec.begin(), vec.end(), 13, thrust::plus<int>());
std::cout << "thrust_result: " << thrust_result << std::endl;
assert(thrust_result == my_result);
std::cout << "int: " << std::endl;
compare<int>();
std::cout << "long int: " << std::endl;
compare<long int>();
std::cout << "float: " << std::endl;
compare<float>();
std::cout << "double: " << std::endl;
compare<double>();
}
| 0ada095fa1619cff55363952a9a28332b7b75c16.cu | #include <cstdio>
#include <bulk/bulk.hpp>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/reduce.h>
#include <thrust/extrema.h>
#include <cassert>
#include <iostream>
#include "time_invocation_cuda.hpp"
#include "decomposition.hpp"
struct reduce_partitions
{
template<typename ConcurrentGroup, typename Iterator1, typename Iterator2, typename T, typename BinaryOperation>
__device__
void operator()(ConcurrentGroup &this_group, Iterator1 first, Iterator1 last, Iterator2 result, T init, BinaryOperation binary_op)
{
T sum = bulk::reduce(this_group, first, last, init, binary_op);
if(this_group.this_exec.index() == 0)
{
*result = sum;
}
}
template<typename ConcurrentGroup, typename Iterator1, typename Iterator2, typename BinaryOperation>
__device__
void operator()(ConcurrentGroup &this_group, Iterator1 first, Iterator1 last, Iterator2 result, BinaryOperation binary_op)
{
// noticeably faster to pass the last element as the init
typename thrust::iterator_value<Iterator2>::type init = last[-1];
(*this)(this_group, first, last - 1, result, init, binary_op);
}
template<typename ConcurrentGroup, typename Iterator1, typename Decomposition, typename Iterator2, typename T, typename BinaryFunction>
__device__
void operator()(ConcurrentGroup &this_group, Iterator1 first, Decomposition decomp, Iterator2 result, T init, BinaryFunction binary_op)
{
typename Decomposition::range range = decomp[this_group.index()];
Iterator1 last = first + range.second;
first += range.first;
if(this_group.index() != 0)
{
// noticeably faster to pass the last element as the init
init = last[-1];
--last;
} // end if
(*this)(this_group, first, last, result + this_group.index(), init, binary_op);
}
};
template<typename RandomAccessIterator,
typename T,
typename BinaryOperation>
T my_reduce(RandomAccessIterator first, RandomAccessIterator last, T init, BinaryOperation binary_op)
{
typedef typename thrust::iterator_difference<RandomAccessIterator>::type size_type;
const size_type n = last - first;
if(n <= 0) return init;
const size_type groupsize = 128;
const size_type grainsize = 7;
const size_type tile_size = groupsize * grainsize;
const size_type num_tiles = (n + tile_size - 1) / tile_size;
const size_type subscription = 10;
bulk::concurrent_group<
bulk::agent<grainsize>,
groupsize
> g;
const size_type num_groups = thrust::min<size_type>(subscription * g.hardware_concurrency(), num_tiles);
aligned_decomposition<size_type> decomp(n, num_groups, tile_size);
thrust::cuda::tag t;
thrust::detail::temporary_array<T,thrust::cuda::tag> partial_sums(t, decomp.size());
// reduce into partial sums
bulk::async(bulk::par(g, decomp.size()), reduce_partitions(), bulk::root.this_exec, first, decomp, partial_sums.begin(), init, binary_op);
if(partial_sums.size() > 1)
{
// reduce the partial sums
bulk::async(g, reduce_partitions(), bulk::root, partial_sums.begin(), partial_sums.end(), partial_sums.begin(), binary_op);
} // end while
return partial_sums[0];
} // end my_reduce()
template<typename T>
T my_reduce(const thrust::device_vector<T> *vec)
{
return my_reduce(vec->begin(), vec->end(), T(0), thrust::plus<T>());
}
template<typename T>
T thrust_reduce(const thrust::device_vector<T> *vec)
{
return thrust::reduce(vec->begin(), vec->end(), T(0), thrust::plus<T>());
}
template<typename T>
void compare()
{
thrust::device_vector<T> vec(1 << 28);
thrust_reduce(&vec);
double thrust_msecs = time_invocation_cuda(50, thrust_reduce<T>, &vec);
my_reduce(&vec);
double my_msecs = time_invocation_cuda(50, my_reduce<T>, &vec);
std::cout << "Thrust's time: " << thrust_msecs << " ms" << std::endl;
std::cout << "My time: " << my_msecs << " ms" << std::endl;
std::cout << "Performance relative to Thrust: " << thrust_msecs / my_msecs << std::endl;
}
int main()
{
size_t n = 123456789;
thrust::device_vector<int> vec(n);
thrust::sequence(vec.begin(), vec.end());
int my_result = my_reduce(vec.begin(), vec.end(), 13, thrust::plus<int>());
std::cout << "my_result: " << my_result << std::endl;
int thrust_result = thrust::reduce(vec.begin(), vec.end(), 13, thrust::plus<int>());
std::cout << "thrust_result: " << thrust_result << std::endl;
assert(thrust_result == my_result);
std::cout << "int: " << std::endl;
compare<int>();
std::cout << "long int: " << std::endl;
compare<long int>();
std::cout << "float: " << std::endl;
compare<float>();
std::cout << "double: " << std::endl;
compare<double>();
}
|
f906cca518da6c44a68939e0c8bd7ae77dabdfa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <opencv2/opencv.hpp>
// kernel
__global__ void __apply(const uint8_t *pic, uint32_t *histogram) {
int32_t i = blockIdx.x;
int32_t j = threadIdx.x;
int32_t n_col = blockDim.x;
int32_t n_row = gridDim.x;
uint32_t pixel_current = i * n_col + j;
__syncthreads();
atomicAdd(histogram+pic[pixel_current],1);
//histogram[pic[pixel_current]]+=1;
__syncthreads();
}
__host__ void histogram_grayscale(const uint8_t *h_pic, uint32_t *h_histogram, size_t n_row, size_t n_col) {
//
size_t size = sizeof(uint8_t) * n_row * n_col;
uint8_t *d_pic;
hipMalloc(&d_pic, size);
hipMemcpy(d_pic, h_pic, size, hipMemcpyHostToDevice);
size_t size_hist = sizeof(uint32_t)*256;
uint32_t* d_histogram;
hipMalloc(&d_histogram, size_hist);
hipMemset(d_histogram, 0, size_hist);
//__apply<<<dim3(n_row, 1, 1), dim3(n_col, 1, 1)>>>(d_pic, d_pic_filter, d_filter, sum_filter, n_row_filter, n_col_filter);
hipLaunchKernelGGL(( __apply), dim3(dim3(n_row, 1, 1)), dim3(dim3(n_col, 1, 1)), 0, 0, d_pic, d_histogram);
if (n_col>1024)
printf("Big picture. Remake separate threads. Not build histogram");
hipMemcpy(h_histogram, d_histogram, size_hist, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(d_pic);
hipFree(d_histogram);
} | f906cca518da6c44a68939e0c8bd7ae77dabdfa4.cu | #include <iostream>
#include <opencv2/opencv.hpp>
// kernel
__global__ void __apply(const uint8_t *pic, uint32_t *histogram) {
int32_t i = blockIdx.x;
int32_t j = threadIdx.x;
int32_t n_col = blockDim.x;
int32_t n_row = gridDim.x;
uint32_t pixel_current = i * n_col + j;
__syncthreads();
atomicAdd(histogram+pic[pixel_current],1);
//histogram[pic[pixel_current]]+=1;
__syncthreads();
}
__host__ void histogram_grayscale(const uint8_t *h_pic, uint32_t *h_histogram, size_t n_row, size_t n_col) {
// перенос картинки и вывода
size_t size = sizeof(uint8_t) * n_row * n_col;
uint8_t *d_pic;
cudaMalloc(&d_pic, size);
cudaMemcpy(d_pic, h_pic, size, cudaMemcpyHostToDevice);
size_t size_hist = sizeof(uint32_t)*256;
uint32_t* d_histogram;
cudaMalloc(&d_histogram, size_hist);
cudaMemset(d_histogram, 0, size_hist);
//__apply<<<dim3(n_row, 1, 1), dim3(n_col, 1, 1)>>>(d_pic, d_pic_filter, d_filter, sum_filter, n_row_filter, n_col_filter);
__apply<<<dim3(n_row, 1, 1), dim3(n_col, 1, 1)>>>(d_pic, d_histogram);
if (n_col>1024)
printf("Big picture. Remake separate threads. Not build histogram");
cudaMemcpy(h_histogram, d_histogram, size_hist, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(d_pic);
cudaFree(d_histogram);
} |
137d1ca1eaf0bd8e55d7376bc0fbc3b98b81f7aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void RoundKernel(float* input, float* output, int size)
{
int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x;
if(id < size)
{
output[id] = round(input[id]);
}
} | 137d1ca1eaf0bd8e55d7376bc0fbc3b98b81f7aa.cu | #include "includes.h"
__global__ void RoundKernel(float* input, float* output, int size)
{
int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x;
if(id < size)
{
output[id] = round(input[id]);
}
} |
3e5b62f0fa86e1842f52958f62ea3953d8008ff7.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
//these are access control for staggered action
#ifdef GPU_STAGGERED_DIRAC
#if (__COMPUTE_CAPABILITY__ >= 300) // Kepler works best with texture loads only
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#else // Fermi
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#endif
#endif // GPU_STAGGERED_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace improvedstaggered {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
#undef GPU_NDEG_TWISTED_MASS_DIRAC
#undef GPU_CLOVER_DIRAC
#undef GPU_DOMAIN_WALL_DIRAC
#define DD_IMPROVED 1
#include <staggered_dslash_def.h> // staggered Dslash kernels
#undef DD_IMPROVED
#include <dslash_quda.cuh>
} // end namespace improvedstaggered
// declare the dslash events
#include <dslash_events.cuh>
using namespace improvedstaggered;
template<typename T> struct RealType {};
template<> struct RealType<double2> { typedef double type; };
template<> struct RealType<float2> { typedef float type; };
template<> struct RealType<float4> { typedef float type; };
template<> struct RealType<short2> { typedef short type; };
template<> struct RealType<short4> { typedef short type; };
#ifdef GPU_STAGGERED_DIRAC
template <typename sFloat, typename fatGFloat, typename longGFloat, typename phaseFloat>
class StaggeredDslashCuda : public DslashCuda {
private:
const fatGFloat *fat0, *fat1;
const longGFloat *long0, *long1;
const phaseFloat *phase0, *phase1;
const double a;
const int nSrc;
protected:
unsigned int sharedBytesPerThread() const
{
#ifdef PARALLEL_DIR
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return 6 * reg_size;
#else
return 0;
#endif
}
public:
StaggeredDslashCuda(cudaColorSpinorField *out, const fatGFloat *fat0, const fatGFloat *fat1,
const longGFloat *long0, const longGFloat *long1,
const phaseFloat *phase0, const phaseFloat *phase1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: DslashCuda(out, in, x, reconstruct, dagger), fat0(fat0), fat1(fat1), long0(long0),
long1(long1), phase0(phase0), phase1(phase1), a(a), nSrc(in->X(4))
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~StaggeredDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dslashParam.swizzle = tp.aux.x;
IMPROVED_STAGGERED_DSLASH(tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(),
fat0, fat1, long0, long1, phase0, phase1,
(sFloat*)in->V(), (float*)in->Norm(),
(sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = deviceProp.sharedMemPerBlock;
// first try to advance block.y (number of right-hand sides per block)
if (param.block.y < nSrc && param.block.y < deviceProp.maxThreadsDim[1] &&
sharedBytesPerThread()*param.block.x*param.block.y < max_shared &&
(param.block.x*(param.block.y+1)) <= deviceProp.maxThreadsPerBlock) {
param.block.y++;
param.grid.y = (nSrc + param.block.y - 1) / param.block.y;
return true;
} else {
bool rtn = DslashCuda::advanceBlockDim(param);
param.block.y = 1;
param.grid.y = nSrc;
return rtn;
}
}
bool advanceAux(TuneParam ¶m) const
{
#ifdef SWIZZLE
if (param.aux.x < 2*deviceProp.multiProcessorCount) {
param.aux.x++;
return true;
} else {
param.aux.x = 1;
return false;
}
#else
return false;
#endif
}
void initTuneParam(TuneParam ¶m) const
{
DslashCuda::initTuneParam(param);
param.block.y = 1;
param.grid.y = nSrc;
param.aux.x = 1;
}
void defaultTuneParam(TuneParam ¶m) const { initTuneParam(param); }
int Nface() { return 6; }
/*
per direction / dimension flops
SU(3) matrix-vector flops = (8 Nc - 2) * Nc
xpay = 2 * 2 * Nc * Ns
So for the full dslash we have
flops = (2 * 2 * Nd * (8*Nc-2) * Nc) + ((2 * 2 * Nd - 1) * 2 * Nc * Ns)
flops_xpay = flops + 2 * 2 * Nc * Ns
For Asqtad this should give 1146 for Nc=3,Ns=2 and 1158 for the axpy equivalent
*/
virtual long long flops() const {
int mv_flops = (8 * in->Ncolor() - 2) * in->Ncolor(); // SU(3) matrix-vector flops
int ghost_flops = (3 + 1) * (mv_flops + 2*in->Ncolor()*in->Nspin());
int xpay_flops = 2 * 2 * in->Ncolor() * in->Nspin(); // multiply and add per real component
int num_dir = 2 * 4; // dir * dim
long long flops = 0;
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
flops = ghost_flops * 2 * in->GhostFace()[dslashParam.kernel_type];
break;
case EXTERIOR_KERNEL_ALL:
{
long long ghost_sites = 2 * (in->GhostFace()[0]+in->GhostFace()[1]+in->GhostFace()[2]+in->GhostFace()[3]);
flops = ghost_flops * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY:
{
long long sites = in->VolumeCB();
flops = (2*num_dir*mv_flops + // SU(3) matrix-vector multiplies
(2*num_dir-1)*2*in->Ncolor()*in->Nspin()) * sites; // accumulation
if (x) flops += xpay_flops * sites; // axpy is always on interior
if (dslashParam.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d=0; d<4; d++) if (dslashParam.commDim[d]) ghost_sites += 2 * in->GhostFace()[d];
flops -= ghost_flops * ghost_sites;
break;
}
}
return flops;
}
virtual long long bytes() const {
int gauge_bytes_fat = QUDA_RECONSTRUCT_NO * in->Precision();
int gauge_bytes_long = reconstruct * in->Precision();
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int spinor_bytes = 2 * in->Ncolor() * in->Nspin() * in->Precision() + (isHalf ? sizeof(float) : 0);
int ghost_bytes = 3 * (spinor_bytes + gauge_bytes_long) + (spinor_bytes + gauge_bytes_fat) + spinor_bytes;
int num_dir = 2 * 4; // set to 4 dimensions since we take care of 5-d fermions in derived classes where necessary
long long bytes = 0;
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
bytes = ghost_bytes * 2 * in->GhostFace()[dslashParam.kernel_type];
break;
case EXTERIOR_KERNEL_ALL:
{
long long ghost_sites = 2 * (in->GhostFace()[0]+in->GhostFace()[1]+in->GhostFace()[2]+in->GhostFace()[3]);
bytes = ghost_bytes * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY:
{
long long sites = in->VolumeCB();
bytes = (num_dir*(gauge_bytes_fat + gauge_bytes_long) + // gauge reads
num_dir*2*spinor_bytes + // spinor reads
spinor_bytes)*sites; // spinor write
if (x) bytes += spinor_bytes;
if (dslashParam.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d=0; d<4; d++) if (dslashParam.commDim[d]) ghost_sites += 2*in->GhostFace()[d];
bytes -= ghost_bytes * ghost_sites;
break;
}
}
return bytes;
}
};
#endif // GPU_STAGGERED_DIRAC
#include <dslash_policy.cuh>
void improvedStaggeredDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &fatGauge,
const cudaGaugeField &longGauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
inSpinor->allocateGhostBuffer(3);
#ifdef GPU_STAGGERED_DIRAC
dslashParam.Ls = out->X(4); // use Ls as the number of sources
#ifdef MULTI_GPU
for(int i=0;i < 4; i++){
if(commDimPartitioned(i) && (fatGauge.X()[i] < 6)){
errorQuda("ERROR: partitioned dimension with local size less than 6 is not supported in staggered dslash\n");
}
}
#endif
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
dslashParam.parity = parity;
dslashParam.gauge_stride = fatGauge.Stride();
dslashParam.long_gauge_stride = longGauge.Stride();
dslashParam.fat_link_max = fatGauge.LinkMax();
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i][0] = in->GhostOffset(i,0)/in->FieldOrder();
dslashParam.ghostOffset[i][1] = in->GhostOffset(i,1)/in->FieldOrder();
dslashParam.ghostNormOffset[i][0] = in->GhostNormOffset(i,0);
dslashParam.ghostNormOffset[i][1] = in->GhostNormOffset(i,1);
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *fatGauge0, *fatGauge1;
void* longGauge0, *longGauge1;
bindFatGaugeTex(fatGauge, parity, &fatGauge0, &fatGauge1);
bindLongGaugeTex(longGauge, parity, &longGauge0, &longGauge1);
void *longPhase0 = (char*)longGauge0 + longGauge.PhaseOffset();
void *longPhase1 = (char*)longGauge1 + longGauge.PhaseOffset();
if (in->Precision() != fatGauge.Precision() || in->Precision() != longGauge.Precision()){
errorQuda("Mixing gauge and spinor precision not supported"
"(precision=%d, fatlinkGauge.precision=%d, longGauge.precision=%d",
in->Precision(), fatGauge.Precision(), longGauge.Precision());
}
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new StaggeredDslashCuda<double2, double2, double2, double>
(out, (double2*)fatGauge0, (double2*)fatGauge1,
(double2*)longGauge0, (double2*)longGauge1,
(double*)longPhase0, (double*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new StaggeredDslashCuda<float2, float2, float4, float>
(out, (float2*)fatGauge0, (float2*)fatGauge1,
(float4*)longGauge0, (float4*)longGauge1,
(float*)longPhase0, (float*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new StaggeredDslashCuda<short2, short2, short4, short>
(out, (short2*)fatGauge0, (short2*)fatGauge1,
(short4*)longGauge0, (short4*)longGauge1,
(short*)longPhase0, (short*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
#ifndef GPU_COMMS
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume()/in->X(4), ghostFace, profile);
dslash_policy.apply(0);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume()/in->X(4), ghostFace, profile);
delete dslashImp;
#endif
delete dslash;
unbindFatGaugeTex(fatGauge);
unbindLongGaugeTex(longGauge);
checkCudaError();
#else
errorQuda("Staggered dslash has not been built");
#endif // GPU_STAGGERED_DIRAC
}
}
| 3e5b62f0fa86e1842f52958f62ea3953d8008ff7.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
//these are access control for staggered action
#ifdef GPU_STAGGERED_DIRAC
#if (__COMPUTE_CAPABILITY__ >= 300) // Kepler works best with texture loads only
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#else // Fermi
//#define DIRECT_ACCESS_FAT_LINK
//#define DIRECT_ACCESS_LONG_LINK
//#define DIRECT_ACCESS_SPINOR
//#define DIRECT_ACCESS_ACCUM
//#define DIRECT_ACCESS_INTER
//#define DIRECT_ACCESS_PACK
#endif
#endif // GPU_STAGGERED_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace improvedstaggered {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
#undef GPU_NDEG_TWISTED_MASS_DIRAC
#undef GPU_CLOVER_DIRAC
#undef GPU_DOMAIN_WALL_DIRAC
#define DD_IMPROVED 1
#include <staggered_dslash_def.h> // staggered Dslash kernels
#undef DD_IMPROVED
#include <dslash_quda.cuh>
} // end namespace improvedstaggered
// declare the dslash events
#include <dslash_events.cuh>
using namespace improvedstaggered;
template<typename T> struct RealType {};
template<> struct RealType<double2> { typedef double type; };
template<> struct RealType<float2> { typedef float type; };
template<> struct RealType<float4> { typedef float type; };
template<> struct RealType<short2> { typedef short type; };
template<> struct RealType<short4> { typedef short type; };
#ifdef GPU_STAGGERED_DIRAC
template <typename sFloat, typename fatGFloat, typename longGFloat, typename phaseFloat>
class StaggeredDslashCuda : public DslashCuda {
private:
const fatGFloat *fat0, *fat1;
const longGFloat *long0, *long1;
const phaseFloat *phase0, *phase1;
const double a;
const int nSrc;
protected:
unsigned int sharedBytesPerThread() const
{
#ifdef PARALLEL_DIR
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return 6 * reg_size;
#else
return 0;
#endif
}
public:
StaggeredDslashCuda(cudaColorSpinorField *out, const fatGFloat *fat0, const fatGFloat *fat1,
const longGFloat *long0, const longGFloat *long1,
const phaseFloat *phase0, const phaseFloat *phase1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: DslashCuda(out, in, x, reconstruct, dagger), fat0(fat0), fat1(fat1), long0(long0),
long1(long1), phase0(phase0), phase1(phase1), a(a), nSrc(in->X(4))
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~StaggeredDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dslashParam.swizzle = tp.aux.x;
IMPROVED_STAGGERED_DSLASH(tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(),
fat0, fat1, long0, long1, phase0, phase1,
(sFloat*)in->V(), (float*)in->Norm(),
(sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = deviceProp.sharedMemPerBlock;
// first try to advance block.y (number of right-hand sides per block)
if (param.block.y < nSrc && param.block.y < deviceProp.maxThreadsDim[1] &&
sharedBytesPerThread()*param.block.x*param.block.y < max_shared &&
(param.block.x*(param.block.y+1)) <= deviceProp.maxThreadsPerBlock) {
param.block.y++;
param.grid.y = (nSrc + param.block.y - 1) / param.block.y;
return true;
} else {
bool rtn = DslashCuda::advanceBlockDim(param);
param.block.y = 1;
param.grid.y = nSrc;
return rtn;
}
}
bool advanceAux(TuneParam ¶m) const
{
#ifdef SWIZZLE
if (param.aux.x < 2*deviceProp.multiProcessorCount) {
param.aux.x++;
return true;
} else {
param.aux.x = 1;
return false;
}
#else
return false;
#endif
}
void initTuneParam(TuneParam ¶m) const
{
DslashCuda::initTuneParam(param);
param.block.y = 1;
param.grid.y = nSrc;
param.aux.x = 1;
}
void defaultTuneParam(TuneParam ¶m) const { initTuneParam(param); }
int Nface() { return 6; }
/*
per direction / dimension flops
SU(3) matrix-vector flops = (8 Nc - 2) * Nc
xpay = 2 * 2 * Nc * Ns
So for the full dslash we have
flops = (2 * 2 * Nd * (8*Nc-2) * Nc) + ((2 * 2 * Nd - 1) * 2 * Nc * Ns)
flops_xpay = flops + 2 * 2 * Nc * Ns
For Asqtad this should give 1146 for Nc=3,Ns=2 and 1158 for the axpy equivalent
*/
virtual long long flops() const {
int mv_flops = (8 * in->Ncolor() - 2) * in->Ncolor(); // SU(3) matrix-vector flops
int ghost_flops = (3 + 1) * (mv_flops + 2*in->Ncolor()*in->Nspin());
int xpay_flops = 2 * 2 * in->Ncolor() * in->Nspin(); // multiply and add per real component
int num_dir = 2 * 4; // dir * dim
long long flops = 0;
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
flops = ghost_flops * 2 * in->GhostFace()[dslashParam.kernel_type];
break;
case EXTERIOR_KERNEL_ALL:
{
long long ghost_sites = 2 * (in->GhostFace()[0]+in->GhostFace()[1]+in->GhostFace()[2]+in->GhostFace()[3]);
flops = ghost_flops * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY:
{
long long sites = in->VolumeCB();
flops = (2*num_dir*mv_flops + // SU(3) matrix-vector multiplies
(2*num_dir-1)*2*in->Ncolor()*in->Nspin()) * sites; // accumulation
if (x) flops += xpay_flops * sites; // axpy is always on interior
if (dslashParam.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d=0; d<4; d++) if (dslashParam.commDim[d]) ghost_sites += 2 * in->GhostFace()[d];
flops -= ghost_flops * ghost_sites;
break;
}
}
return flops;
}
virtual long long bytes() const {
int gauge_bytes_fat = QUDA_RECONSTRUCT_NO * in->Precision();
int gauge_bytes_long = reconstruct * in->Precision();
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int spinor_bytes = 2 * in->Ncolor() * in->Nspin() * in->Precision() + (isHalf ? sizeof(float) : 0);
int ghost_bytes = 3 * (spinor_bytes + gauge_bytes_long) + (spinor_bytes + gauge_bytes_fat) + spinor_bytes;
int num_dir = 2 * 4; // set to 4 dimensions since we take care of 5-d fermions in derived classes where necessary
long long bytes = 0;
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
bytes = ghost_bytes * 2 * in->GhostFace()[dslashParam.kernel_type];
break;
case EXTERIOR_KERNEL_ALL:
{
long long ghost_sites = 2 * (in->GhostFace()[0]+in->GhostFace()[1]+in->GhostFace()[2]+in->GhostFace()[3]);
bytes = ghost_bytes * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY:
{
long long sites = in->VolumeCB();
bytes = (num_dir*(gauge_bytes_fat + gauge_bytes_long) + // gauge reads
num_dir*2*spinor_bytes + // spinor reads
spinor_bytes)*sites; // spinor write
if (x) bytes += spinor_bytes;
if (dslashParam.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d=0; d<4; d++) if (dslashParam.commDim[d]) ghost_sites += 2*in->GhostFace()[d];
bytes -= ghost_bytes * ghost_sites;
break;
}
}
return bytes;
}
};
#endif // GPU_STAGGERED_DIRAC
#include <dslash_policy.cuh>
void improvedStaggeredDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &fatGauge,
const cudaGaugeField &longGauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
inSpinor->allocateGhostBuffer(3);
#ifdef GPU_STAGGERED_DIRAC
dslashParam.Ls = out->X(4); // use Ls as the number of sources
#ifdef MULTI_GPU
for(int i=0;i < 4; i++){
if(commDimPartitioned(i) && (fatGauge.X()[i] < 6)){
errorQuda("ERROR: partitioned dimension with local size less than 6 is not supported in staggered dslash\n");
}
}
#endif
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
dslashParam.parity = parity;
dslashParam.gauge_stride = fatGauge.Stride();
dslashParam.long_gauge_stride = longGauge.Stride();
dslashParam.fat_link_max = fatGauge.LinkMax();
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i][0] = in->GhostOffset(i,0)/in->FieldOrder();
dslashParam.ghostOffset[i][1] = in->GhostOffset(i,1)/in->FieldOrder();
dslashParam.ghostNormOffset[i][0] = in->GhostNormOffset(i,0);
dslashParam.ghostNormOffset[i][1] = in->GhostNormOffset(i,1);
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *fatGauge0, *fatGauge1;
void* longGauge0, *longGauge1;
bindFatGaugeTex(fatGauge, parity, &fatGauge0, &fatGauge1);
bindLongGaugeTex(longGauge, parity, &longGauge0, &longGauge1);
void *longPhase0 = (char*)longGauge0 + longGauge.PhaseOffset();
void *longPhase1 = (char*)longGauge1 + longGauge.PhaseOffset();
if (in->Precision() != fatGauge.Precision() || in->Precision() != longGauge.Precision()){
errorQuda("Mixing gauge and spinor precision not supported"
"(precision=%d, fatlinkGauge.precision=%d, longGauge.precision=%d",
in->Precision(), fatGauge.Precision(), longGauge.Precision());
}
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new StaggeredDslashCuda<double2, double2, double2, double>
(out, (double2*)fatGauge0, (double2*)fatGauge1,
(double2*)longGauge0, (double2*)longGauge1,
(double*)longPhase0, (double*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new StaggeredDslashCuda<float2, float2, float4, float>
(out, (float2*)fatGauge0, (float2*)fatGauge1,
(float4*)longGauge0, (float4*)longGauge1,
(float*)longPhase0, (float*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new StaggeredDslashCuda<short2, short2, short4, short>
(out, (short2*)fatGauge0, (short2*)fatGauge1,
(short4*)longGauge0, (short4*)longGauge1,
(short*)longPhase0, (short*)longPhase1,
longGauge.Reconstruct(), in, x, k, dagger);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
#ifndef GPU_COMMS
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume()/in->X(4), ghostFace, profile);
dslash_policy.apply(0);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume()/in->X(4), ghostFace, profile);
delete dslashImp;
#endif
delete dslash;
unbindFatGaugeTex(fatGauge);
unbindLongGaugeTex(longGauge);
checkCudaError();
#else
errorQuda("Staggered dslash has not been built");
#endif // GPU_STAGGERED_DIRAC
}
}
|
fb7625cdc3af5726f887e4ab27b455cffe7554c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
// modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
void deformable_im2col(
const at::Tensor data_im, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in deformable_im2col: %s\n", hipGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n, const scalar_t *data_col, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
void deformable_col2im(
const at::Tensor data_col, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
at::Tensor grad_im)
{
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_offset_, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in deformable_col2im: %s\n", hipGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col,
const scalar_t *data_im, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, scalar_t *grad_offset)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
void deformable_col2im_coord(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, at::Tensor grad_offset)
{
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_im_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_im,
const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_offset, scalar_t *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset_, grad_mask_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err));
}
} | fb7625cdc3af5726f887e4ab27b455cffe7554c0.cu | /*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
// modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
void deformable_im2col(
const at::Tensor data_im, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n, const scalar_t *data_col, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
void deformable_col2im(
const at::Tensor data_col, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
at::Tensor grad_im)
{
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_offset_, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col,
const scalar_t *data_im, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, scalar_t *grad_offset)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
void deformable_col2im_coord(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, at::Tensor grad_offset)
{
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_im_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_im,
const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_offset, scalar_t *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data<scalar_t>();
modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset_, grad_mask_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
}
} |
1eb9f3b42eff39a2b493898164802b9bf5112db7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "mmm_gpu_execution.h"
#include "../../test-case/mmm/mmm_structure.h"
#include "../../runtime/structure.h"
#include "../../gpu-utils/gpu_constant.h"
#include "../../gpu-offloader/gpu_code_executor.h"
#include "../../gpu-offloader/lpu_parts_tracking.h"
#include "../../utils/list.h"
#include "../../gpu-utils/gpu_partition.h"
#include "../../gpu-utils/gpu_utils.h"
//----------------------------------------------------- MMM Batch LPU Controller -----------------------------------------------------/
MMMLpuBatchController::MMMLpuBatchController(int lpuCountThreshold, long memLimit) : LpuBatchController() {
List<const char*> *propertyNames = new List<const char*>;
propertyNames->Append("a");
propertyNames->Append("b");
propertyNames->Append("c");
List<const char*> *toBeModifiedProperties = new List<const char*>;
toBeModifiedProperties->Append("c");
setBufferManager(new LpuDataBufferManager(propertyNames));
initialize(lpuCountThreshold, memLimit, propertyNames, toBeModifiedProperties);
}
int MMMLpuBatchController::calculateLpuMemoryRequirement(LPU *lpu) {
mmm::MMMLpu *mmmLpu = (mmm::MMMLpu *) lpu;
int size = 0;
if (!dataPartTracker->isAlreadyIncluded(mmmLpu->aPartId, "a")) {
size += (mmmLpu->aPartDims[0].storage.getLength()
* mmmLpu->aPartDims[1].storage.getLength()) * sizeof(double);
}
if (!dataPartTracker->isAlreadyIncluded(mmmLpu->bPartId, "b")) {
size += (mmmLpu->bPartDims[0].storage.getLength()
* mmmLpu->bPartDims[1].storage.getLength()) * sizeof(double);
}
if (!dataPartTracker->isAlreadyIncluded(mmmLpu->aPartId, "c")) {
size += (mmmLpu->cPartDims[0].storage.getLength()
* mmmLpu->cPartDims[1].storage.getLength()) * sizeof(double);
}
return size;
}
void MMMLpuBatchController::addLpuToTheCurrentBatch(LPU *lpu) {
mmm::MMMLpu *mmmLpu = (mmm::MMMLpu *) lpu;
LpuDataPart *aPart = new LpuDataPart(2,
mmmLpu->aPartDims, mmmLpu->a, sizeof(double), mmmLpu->aPartId);
bool notRedundant = dataPartTracker->addDataPart(aPart, "a");
if (!notRedundant) {
delete aPart;
}
LpuDataPart *bPart = new LpuDataPart(2,
mmmLpu->bPartDims, mmmLpu->b, sizeof(double), mmmLpu->bPartId);
notRedundant = dataPartTracker->addDataPart(bPart, "b");
if (!notRedundant) {
delete bPart;
}
LpuDataPart *cPart = new LpuDataPart(2,
mmmLpu->cPartDims, mmmLpu->c, sizeof(double), mmmLpu->cPartId);
notRedundant = dataPartTracker->addDataPart(cPart, "c");
if (!notRedundant) {
delete cPart;
}
LpuBatchController::addLpuToTheCurrentBatch(lpu);
}
//------------------------------------------------------ Offloading GPU Kernels ------------------------------------------------------/
__global__ void matrixMultiplyKernel(MMMLpuBatchRange batchRange,
mmm::Partition partition,
mmm::ArrayMetadata arrayMetadata,
mmm::TaskGlobals *taskGlobals,
mmm::ThreadLocals *threadLocals,
GpuBufferReferences aBuffers,
GpuBufferReferences bBuffers,
GpuBufferReferences cBuffers) {
/*----------------------------------------------------------------------------------------------------------------------------
Space A: Top-most User Defined Space
----------------------------------------------------------------------------------------------------------------------------*/
// before we can do anything in the kernel, we need to determine the thread, warp, and sm IDs of the thread
// executing the kernel code
int smId = blockIdx.x;
int warpId = threadIdx.x / WARP_SIZE;
int threadId = threadIdx.x % WARP_SIZE;
// variables for holding the data part references for the top-space LPU
__shared__ double *a, *b, *c;
// variables for tracking storage and partition dimensions of the top space LPU's data parts
__shared__ int aSRanges[2][2], bSRanges[2][2], cSRanges[2][2];
__shared__ int aPRanges[2][2], bPRanges[2][2], cPRanges[2][2];
// SMs stride over different indexes to get different LPUs to operate on
Range lpuIdRange = batchRange.lpuIdRange;
for (int linearId = lpuIdRange.min + smId; linearId <= lpuIdRange.max; linearId += BLOCK_COUNT) {
// all threads should synchronize here to prevent the LPU metadata writer threads to overwrite the old
// values before the other threads are done using those values
__syncthreads();
// point the a, b, c matrix references to the memory addresses where corresponding data parts for the
// current LPUs starts
if (warpId == 0 && threadId == 0) {
//------------------------------------------------------------- retrieve a and its dimensions
__shared__ int lpuIndex, aIndex, aStartsAt, aDimRangeStart;
lpuIndex = linearId - lpuIdRange.min;
aIndex = aBuffers.partIndexBuffer[lpuIndex];
aStartsAt = aBuffers.partBeginningBuffer[aIndex];
a = (double *) (aBuffers.dataBuffer + aStartsAt);
aDimRangeStart = aIndex * 2 * 2 * 2;
aSRanges[0][0] = aBuffers.partRangeBuffer[aDimRangeStart];
aSRanges[0][1] = aBuffers.partRangeBuffer[aDimRangeStart + 1];
aSRanges[1][0] = aBuffers.partRangeBuffer[aDimRangeStart + 2];
aSRanges[1][1] = aBuffers.partRangeBuffer[aDimRangeStart + 3];
aPRanges[0][0] = aBuffers.partRangeBuffer[aDimRangeStart + 4];
aPRanges[0][1] = aBuffers.partRangeBuffer[aDimRangeStart + 5];
aPRanges[1][0] = aBuffers.partRangeBuffer[aDimRangeStart + 6];
aPRanges[1][1] = aBuffers.partRangeBuffer[aDimRangeStart + 7];
//------------------------------------------------------------- retrieve b and its dimensions
__shared__ int bIndex, bStartsAt, bDimRangeStart;
bIndex = bBuffers.partIndexBuffer[lpuIndex];
bStartsAt = bBuffers.partBeginningBuffer[bIndex];
b = (double *) (bBuffers.dataBuffer + bStartsAt);
bDimRangeStart = bIndex * 2 * 2 * 2;
bSRanges[0][0] = bBuffers.partRangeBuffer[bDimRangeStart];
bSRanges[0][1] = bBuffers.partRangeBuffer[bDimRangeStart + 1];
bSRanges[1][0] = bBuffers.partRangeBuffer[bDimRangeStart + 2];
bSRanges[1][1] = bBuffers.partRangeBuffer[bDimRangeStart + 3];
bPRanges[0][0] = bBuffers.partRangeBuffer[bDimRangeStart + 4];
bPRanges[0][1] = bBuffers.partRangeBuffer[bDimRangeStart + 5];
bPRanges[1][0] = bBuffers.partRangeBuffer[bDimRangeStart + 6];
bPRanges[1][1] = bBuffers.partRangeBuffer[bDimRangeStart + 7];
//------------------------------------------------------------- retrieve c and its dimensions
__shared__ int cIndex, cStartsAt, cDimRangeStart;
cIndex = cBuffers.partIndexBuffer[lpuIndex];
cStartsAt = cBuffers.partBeginningBuffer[cIndex];
c = (double *) (cBuffers.dataBuffer + cStartsAt);
cDimRangeStart = cIndex * 2 * 2 * 2;
cSRanges[0][0] = cBuffers.partRangeBuffer[cDimRangeStart];
cSRanges[0][1] = cBuffers.partRangeBuffer[cDimRangeStart + 1];
cSRanges[1][0] = cBuffers.partRangeBuffer[cDimRangeStart + 2];
cSRanges[1][1] = cBuffers.partRangeBuffer[cDimRangeStart + 3];
cPRanges[0][0] = cBuffers.partRangeBuffer[cDimRangeStart + 4];
cPRanges[0][1] = cBuffers.partRangeBuffer[cDimRangeStart + 5];
cPRanges[1][0] = cBuffers.partRangeBuffer[cDimRangeStart + 6];
cPRanges[1][1] = cBuffers.partRangeBuffer[cDimRangeStart + 7];
}
__syncthreads();
/*--------------------------------------------------------------------------------------------------------------------
Space A-Sub: Compiler Generated Space for Subpartition
--------------------------------------------------------------------------------------------------------------------*/
// once we have the storage and partition dimensions of data structure at the top-level space's LPU
// we can determine the sub-partition space's Lpu count
int subpartitionCount = block_size_part_count(bPRanges[0], partition.blockSize);
__shared__ int aPSubRanges[2][2], bPSubRanges[2][2];
// the subpartitioned LPUs are processed one by one; remember that LPUs of sub-partitioned LPSes are
// not supposed to be distributed
for (int subLpu = 0; subLpu < subpartitionCount; subLpu++) {
// the first warp should not advance to the next sub-partition when other warps have not yet
// finish their computation for the current sub-partition
__syncthreads();
if (warpId == 0 && threadId == 0) {
// first we need to determine the partition dimension ranges of the two sub-
// partitioned data structures, which are matrix A and B
__shared__ int lpuId;
lpuId = subLpu;
aPSubRanges[0][0] = aPRanges[0][0];
aPSubRanges[0][1] = aPRanges[0][1];
block_size_part_range(aPSubRanges[1], aPRanges[1],
subpartitionCount, lpuId, partition.blockSize, 0, 0);
block_size_part_range(bPSubRanges[0], bPRanges[0],
subpartitionCount, lpuId, partition.blockSize, 0, 0);
bPSubRanges[1][0] = bPRanges[1][0];
bPSubRanges[1][1] = bPRanges[1][1];
}
__syncthreads();
/*------------------------------------------------------------------------------------------------------------
// here we should load sub-section of A and B from the GPU card memory to the local memory
// what about C? Or should we directly perform all computation on the card memory and rely
// on the hardware's caching machanism to do the global and shared memory interactions?
// In the multicore and segmented memory architecture cases the matrix-matrix multiplication
// code starts here. In the GPU, the existing partition scheme will result in only one warp
// within an SM doing computation for the user code. Rather the user should have the
// computation to be distributed to multiple warps for different smaller sub-sections of the
// block of matrix C using another lower level LPS
--------------------------------------------------------------------------------------------------------------
Space B: Lowest User Defined Space
------------------------------------------------------------------------------------------------------------*/
// Space B LPUs will be distributed among the warps; so the parts' dimension configuration
// should be different for different warps and we cannot have a single shared object per
// part information as we have done in the previous LPSes. Rather, we will have a shared
// memory pannel having 1 entry per warp to hold relevant part's dimension configuration.
__shared__ int aSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int bSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int cSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int spaceBLpuCount1, spaceBLpuCount2, spaceBLpuCount;
if (warpId == 0 && threadId == 0) {
spaceBLpuCount1 = block_size_part_count(cPRanges[0], 1);
spaceBLpuCount2 = block_size_part_count(cPRanges[1], partition.blockSize);
spaceBLpuCount = spaceBLpuCount1 * spaceBLpuCount2;
}
__syncthreads();
// distribute the Space B LPUs among the warps
__shared__ int spaceBLpuId[WARP_COUNT][2];
for (int spaceBLpu = warpId; spaceBLpu < spaceBLpuCount; spaceBLpu += WARP_COUNT) {
if (threadId == 0) {
// construct the 2 dimensional LPU ID from the linear LPU Id
spaceBLpuId[warpId][0] = spaceBLpu / spaceBLpuCount2;
spaceBLpuId[warpId][1] = spaceBLpu % spaceBLpuCount2;
//---------------------------------------------------- A part dimensions
block_size_part_range(aSpaceBPRanges[warpId][0], aPSubRanges[0],
spaceBLpuCount1, spaceBLpuId[warpId][0], 1, 0, 0);
aSpaceBPRanges[warpId][1][0] = aPSubRanges[1][0];
aSpaceBPRanges[warpId][1][1] = aPSubRanges[1][1];
//---------------------------------------------------- B part dimensions
bSpaceBPRanges[warpId][0][0] = bPSubRanges[0][0];
bSpaceBPRanges[warpId][0][1] = bPSubRanges[0][1];
block_size_part_range(bSpaceBPRanges[warpId][1], bPSubRanges[1],
spaceBLpuCount2, spaceBLpuId[warpId][1],
partition.blockSize, 0, 0);
//---------------------------------------------------- C part dimensions
block_size_part_range(cSpaceBPRanges[warpId][0], cPRanges[0],
spaceBLpuCount1, spaceBLpuId[warpId][0], 1, 0, 0);
block_size_part_range(cSpaceBPRanges[warpId][1], cPRanges[1],
spaceBLpuCount2, spaceBLpuId[warpId][1],
partition.blockSize, 0, 0);
}
// there is no syncthread operation needed here as updates done by a thread in a
// warp is visible to all other threads in that warp
/*----------------------------------------------------------------------------------------------------
Translated Computation Stage
----------------------------------------------------------------------------------------------------*/
// the compute stage for IT matrix-matrix multiplication looks like the following
// do {
// c[i][j] = c[i][j] + a[i][k] * b[k][j]
// } for i, j in c; k in a
// In each warp we have 32 threads performing the same instruction in a lock-step
// fasion. If we can make the threads working on different piece of data then we
// can have a vectorized translation of the IT for loop without any additional data
// synchronization among the threads. A simple static analysis of the code block
// should detect that i and j indices both appeared on the left hand side of the
// enclosed statement but not the k index. So we can let different threads work on
// different i or j values. In general, we should avoid varying both indices at the
// same time to reduce memory bank conflicts.
// But how do we select the index for distribution among threads that has the best
// potential for coalescing global memory and reducing shared memory accesses? The
// selection also need be cautious about compromising opportunities of parallelism.
// The initial solution for this is incorporated in GPU utility library that, given,
// a set of ranges to iterate, provides loop starting indexes and step sizes.
int iterableRanges[4];
iterableRanges[0] = cSpaceBPRanges[warpId][0][0];
iterableRanges[1] = cSpaceBPRanges[warpId][0][1];
iterableRanges[2] = cSpaceBPRanges[warpId][1][0];
iterableRanges[3] = cSpaceBPRanges[warpId][1][1];
int indexesAndSteps[4];
determineLoopIndexesAndSteps(2, threadId, iterableRanges, indexesAndSteps);
// iterate over the rows
int iStart = indexesAndSteps[0];
int iEnd = iterableRanges[1];
int iStep = indexesAndSteps[1];
for (int i = iStart; i <= iEnd; i += iStep) {
int c_i = i - cSRanges[0][0];
int a_i = i - aSRanges[0][0];
// iterate over the columns
int jStart = indexesAndSteps[2];
int jEnd = iterableRanges[3];
int jStep = indexesAndSteps[3];
for (int j = jStart; j <= jEnd; j+= jStep) {
int c_j = j - cSRanges[1][0];
int b_j = j - bSRanges[1][0];
// iterate over the common dimension
int kStart = aSpaceBPRanges[warpId][1][0];
int kEnd = aSpaceBPRanges[warpId][1][1];
for (int k = kStart; k <= kEnd; k++) {
int a_k = k - aSRanges[1][0];
int b_k = k - bSRanges[0][0];
int cIndex = c_i * (cSRanges[1][1] - cSRanges[1][0] + 1) + c_j;
int aIndex = a_i * (aSRanges[1][1] - aSRanges[1][0] + 1) + a_k;
int bIndex = b_k * (bSRanges[1][1] - bSRanges[1][0] + 1) + b_j;
c[cIndex] += a[aIndex] * b[bIndex];
}
}
}
}
}
}
}
__global__ void matrixMultiplyKernelSharedMem(MMMLpuBatchRange batchRange,
mmm::Partition partition,
mmm::ArrayMetadata arrayMetadata,
mmm::TaskGlobals *taskGlobals,
mmm::ThreadLocals *threadLocals,
GpuBufferReferences aBuffers,
GpuBufferReferences bBuffers,
GpuBufferReferences cBuffers) {
// declaration of the shared dynamic memory panel that will be used to create SM-local data parts and an index
// that keeps track of the next free location in the memory panel
extern __shared__ char MEMORY_PANEL[];
__shared__ int FREE_MEMORY_INDEX;
FREE_MEMORY_INDEX = 0;
/*----------------------------------------------------------------------------------------------------------------------------
Space A: Top-most User Defined Space
----------------------------------------------------------------------------------------------------------------------------*/
// before we can do anything in the kernel, we need to determine the thread, warp, and sm IDs of the thread
// executing the kernel code
int smId = blockIdx.x;
int warpId = threadIdx.x / WARP_SIZE;
int threadId = threadIdx.x % WARP_SIZE;
// variables for holding the data part references for the top-space LPU
__shared__ double *a, *b, *c;
// variables for tracking storage and partition dimensions of the top space LPU's data parts
__shared__ int aSRanges[2][2], bSRanges[2][2], cSRanges[2][2];
__shared__ int aPRanges[2][2], bPRanges[2][2], cPRanges[2][2];
// SMs stride over different indexes to get different LPUs to operate on
Range lpuIdRange = batchRange.lpuIdRange;
for (int linearId = lpuIdRange.min + smId; linearId <= lpuIdRange.max; linearId += BLOCK_COUNT) {
// all threads should synchronize here to prevent the LPU metadata writer threads to overwrite the old
// values before the other threads are done using those values
__syncthreads();
// point the a, b, c matrix references to the memory addresses where corresponding data parts for the
// current LPUs starts
if (warpId == 0 && threadId == 0) {
//------------------------------------------------------------- retrieve a and its dimensions
__shared__ int lpuIndex, aIndex, aStartsAt, aDimRangeStart;
lpuIndex = linearId - lpuIdRange.min;
aIndex = aBuffers.partIndexBuffer[lpuIndex];
aStartsAt = aBuffers.partBeginningBuffer[aIndex];
a = (double *) (aBuffers.dataBuffer + aStartsAt);
aDimRangeStart = aIndex * 2 * 2 * 2;
aSRanges[0][0] = aBuffers.partRangeBuffer[aDimRangeStart];
aSRanges[0][1] = aBuffers.partRangeBuffer[aDimRangeStart + 1];
aSRanges[1][0] = aBuffers.partRangeBuffer[aDimRangeStart + 2];
aSRanges[1][1] = aBuffers.partRangeBuffer[aDimRangeStart + 3];
aPRanges[0][0] = aBuffers.partRangeBuffer[aDimRangeStart + 4];
aPRanges[0][1] = aBuffers.partRangeBuffer[aDimRangeStart + 5];
aPRanges[1][0] = aBuffers.partRangeBuffer[aDimRangeStart + 6];
aPRanges[1][1] = aBuffers.partRangeBuffer[aDimRangeStart + 7];
//------------------------------------------------------------- retrieve b and its dimensions
__shared__ int bIndex, bStartsAt, bDimRangeStart;
bIndex = bBuffers.partIndexBuffer[lpuIndex];
bStartsAt = bBuffers.partBeginningBuffer[bIndex];
b = (double *) (bBuffers.dataBuffer + bStartsAt);
bDimRangeStart = bIndex * 2 * 2 * 2;
bSRanges[0][0] = bBuffers.partRangeBuffer[bDimRangeStart];
bSRanges[0][1] = bBuffers.partRangeBuffer[bDimRangeStart + 1];
bSRanges[1][0] = bBuffers.partRangeBuffer[bDimRangeStart + 2];
bSRanges[1][1] = bBuffers.partRangeBuffer[bDimRangeStart + 3];
bPRanges[0][0] = bBuffers.partRangeBuffer[bDimRangeStart + 4];
bPRanges[0][1] = bBuffers.partRangeBuffer[bDimRangeStart + 5];
bPRanges[1][0] = bBuffers.partRangeBuffer[bDimRangeStart + 6];
bPRanges[1][1] = bBuffers.partRangeBuffer[bDimRangeStart + 7];
//------------------------------------------------------------- retrieve c and its dimensions
__shared__ int cIndex, cStartsAt, cDimRangeStart;
cIndex = cBuffers.partIndexBuffer[lpuIndex];
cStartsAt = cBuffers.partBeginningBuffer[cIndex];
c = (double *) (cBuffers.dataBuffer + cStartsAt);
cDimRangeStart = cIndex * 2 * 2 * 2;
cSRanges[0][0] = cBuffers.partRangeBuffer[cDimRangeStart];
cSRanges[0][1] = cBuffers.partRangeBuffer[cDimRangeStart + 1];
cSRanges[1][0] = cBuffers.partRangeBuffer[cDimRangeStart + 2];
cSRanges[1][1] = cBuffers.partRangeBuffer[cDimRangeStart + 3];
cPRanges[0][0] = cBuffers.partRangeBuffer[cDimRangeStart + 4];
cPRanges[0][1] = cBuffers.partRangeBuffer[cDimRangeStart + 5];
cPRanges[1][0] = cBuffers.partRangeBuffer[cDimRangeStart + 6];
cPRanges[1][1] = cBuffers.partRangeBuffer[cDimRangeStart + 7];
}
__syncthreads();
/*--------------------------------------------------------------------------------------------------------------------
Copying C into shared memory
--------------------------------------------------------------------------------------------------------------------*/
// allocate enough space for C part in the dynamic shared memory panel
__shared__ double *c_shared;
if (warpId == 0 && threadId == 0) {
c_shared = (double *) MEMORY_PANEL;
//c_shared = (double *) MEMORY_PANEL[FREE_MEMORY_INDEX];
int cSize = (cPRanges[0][1] - cPRanges[0][0] + 1)
* (cPRanges[1][1] - cPRanges[1][0] + 1) * sizeof(double);
FREE_MEMORY_INDEX += (cSize / MEMORY_PANNEL_ALIGNMENT_BOUNDARY)
* MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
if (cSize % MEMORY_PANNEL_ALIGNMENT_BOUNDARY != 0) {
FREE_MEMORY_INDEX += MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
}
}
__syncthreads();
// cooperatively load the current values of C from card memory part to the shared part
for (int i = cPRanges[0][0] + warpId; i <= cPRanges[0][1]; i += WARP_COUNT) {
for (int j = cPRanges[1][0] + threadId; j <= cPRanges[1][1]; j += WARP_SIZE) {
c_shared[(i - cPRanges[0][0])
* (cPRanges[1][1] - cPRanges[1][0] + 1)
+ (j - cPRanges[1][0])]
= c[(i - cSRanges[0][0])
* (cSRanges[1][1] - cSRanges[1][0] + 1)
+ (j - cSRanges[1][0])];
}
}
/*--------------------------------------------------------------------------------------------------------------------
Space A-Sub: Compiler Generated Space for Subpartition
--------------------------------------------------------------------------------------------------------------------*/
// once we have the storage and partition dimensions of data structure at the top-level space's LPU
// we can determine the sub-partition space's Lpu count
int subpartitionCount = block_size_part_count(bPRanges[0], partition.blockSize);
__shared__ int aPSubRanges[2][2], bPSubRanges[2][2];
// the subpartitioned LPUs are processed one by one; remember that LPUs of sub-partitioned LPSes are
// not supposed to be distributed
for (int subLpu = 0; subLpu < subpartitionCount; subLpu++) {
// the first warp should not advance to the next sub-partition when other warps have not yet
// finish their computation for the current sub-partition
__syncthreads();
if (warpId == 0 && threadId == 0) {
// first we need to determine the partition dimension ranges of the two sub-
// partitioned data structures, which are matrix A and B
__shared__ int lpuId;
lpuId = subLpu;
aPSubRanges[0][0] = aPRanges[0][0];
aPSubRanges[0][1] = aPRanges[0][1];
block_size_part_range(aPSubRanges[1], aPRanges[1],
subpartitionCount, lpuId, partition.blockSize, 0, 0);
block_size_part_range(bPSubRanges[0], bPRanges[0],
subpartitionCount, lpuId, partition.blockSize, 0, 0);
bPSubRanges[1][0] = bPRanges[1][0];
bPSubRanges[1][1] = bPRanges[1][1];
}
__syncthreads();
/*------------------------------------------------------------------------------------------------------------
Copying A and B into Shared Memory
------------------------------------------------------------------------------------------------------------*/
// allocate enough space for the two shared variables in the dynamic shared memory panel
__shared__ double *a_shared, *b_shared;
if (warpId == 0 && threadId == 0) {
a_shared = (double *) (MEMORY_PANEL + 32 * 32 * sizeof(double));
//a_shared = (double *) MEMORY_PANEL[FREE_MEMORY_INDEX];
int aSize = (aPSubRanges[0][1] - aPSubRanges[0][0] + 1)
* (aPSubRanges[1][1] - aPSubRanges[1][0] + 1) * sizeof(double);
FREE_MEMORY_INDEX += (aSize / MEMORY_PANNEL_ALIGNMENT_BOUNDARY)
* MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
if (aSize % MEMORY_PANNEL_ALIGNMENT_BOUNDARY != 0) {
FREE_MEMORY_INDEX += MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
}
b_shared = (double *) (MEMORY_PANEL + 2 * 32 * 32 * sizeof(double));
//b_shared = (double *) MEMORY_PANEL[FREE_MEMORY_INDEX];
int bSize = (bPSubRanges[0][1] - bPSubRanges[0][0] + 1)
* (bPSubRanges[1][1] - bPSubRanges[1][0] + 1) * sizeof(double);
FREE_MEMORY_INDEX += (bSize / MEMORY_PANNEL_ALIGNMENT_BOUNDARY)
* MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
if (bSize % MEMORY_PANNEL_ALIGNMENT_BOUNDARY != 0) {
FREE_MEMORY_INDEX += MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
}
}
__syncthreads();
// copy data from the global memory to the SM memory
for (int i = aPSubRanges[0][0] + warpId; i <= aPSubRanges[0][1]; i += WARP_COUNT) {
for (int j = aPSubRanges[1][0] + threadId;
j <= aPSubRanges[1][1]; j += WARP_SIZE) {
a_shared[(i - aPSubRanges[0][0])
* (aPSubRanges[1][1] - aPSubRanges[1][0] + 1)
+ (j - aPSubRanges[1][0])]
= a[(i - aSRanges[0][0])
* (aSRanges[1][1] - aSRanges[1][0] + 1)
+ (j - aSRanges[1][0])];
}
}
for (int i = bPSubRanges[0][0] + warpId; i <= bPSubRanges[0][1]; i += WARP_COUNT) {
for (int j = bPSubRanges[1][0] + threadId;
j <= bPSubRanges[1][1]; j += WARP_SIZE) {
b_shared[(i - bPSubRanges[0][0])
* (bPSubRanges[1][1] - bPSubRanges[1][0] + 1)
+ (j - bPSubRanges[1][0])]
= b[(i - bSRanges[0][0])
* (bSRanges[1][1] - bSRanges[1][0] + 1)
+ (j - bSRanges[1][0])];
}
}
__syncthreads();
/*------------------------------------------------------------------------------------------------------------
// here we should load sub-section of A and B from the GPU card memory to the local memory
// what about C? Or should we directly perform all computation on the card memory and rely
// on the hardware's caching machanism to do the global and shared memory interactions?
// In the multicore and segmented memory architecture cases the matrix-matrix multiplication
// code starts here. In the GPU, the existing partition scheme will result in only one warp
// within an SM doing computation for the user code. Rather the user should have the
// computation to be distributed to multiple warps for different smaller sub-sections of the
// block of matrix C using another lower level LPS
--------------------------------------------------------------------------------------------------------------
Space B: Lowest User Defined Space
------------------------------------------------------------------------------------------------------------*/
// Space B LPUs will be distributed among the warps; so the parts' dimension configuration
// should be different for different warps and we cannot have a single shared object per
// part information as we have done in the previous LPSes. Rather, we will have a shared
// memory pannel having 1 entry per warp to hold relevant part's dimension configuration.
__shared__ int aSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int bSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int cSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int spaceBLpuCount1, spaceBLpuCount2, spaceBLpuCount;
if (warpId == 0 && threadId == 0) {
spaceBLpuCount1 = block_size_part_count(cPRanges[0], 1);
spaceBLpuCount2 = block_size_part_count(cPRanges[1], partition.blockSize);
spaceBLpuCount = spaceBLpuCount1 * spaceBLpuCount2;
}
__syncthreads();
// distribute the Space B LPUs among the warps
__shared__ int spaceBLpuId[WARP_COUNT][2];
for (int spaceBLpu = warpId; spaceBLpu < spaceBLpuCount; spaceBLpu += WARP_COUNT) {
if (threadId == 0) {
// construct the 2 dimensional LPU ID from the linear LPU Id
spaceBLpuId[warpId][0] = spaceBLpu / spaceBLpuCount2;
spaceBLpuId[warpId][1] = spaceBLpu % spaceBLpuCount2;
//---------------------------------------------------- A part dimensions
block_size_part_range(aSpaceBPRanges[warpId][0], aPSubRanges[0],
spaceBLpuCount1, spaceBLpuId[warpId][0], 1, 0, 0);
aSpaceBPRanges[warpId][1][0] = aPSubRanges[1][0];
aSpaceBPRanges[warpId][1][1] = aPSubRanges[1][1];
//---------------------------------------------------- B part dimensions
bSpaceBPRanges[warpId][0][0] = bPSubRanges[0][0];
bSpaceBPRanges[warpId][0][1] = bPSubRanges[0][1];
block_size_part_range(bSpaceBPRanges[warpId][1], bPSubRanges[1],
spaceBLpuCount2, spaceBLpuId[warpId][1],
partition.blockSize, 0, 0);
//---------------------------------------------------- C part dimensions
block_size_part_range(cSpaceBPRanges[warpId][0], cPRanges[0],
spaceBLpuCount1, spaceBLpuId[warpId][0], 1, 0, 0);
block_size_part_range(cSpaceBPRanges[warpId][1], cPRanges[1],
spaceBLpuCount2, spaceBLpuId[warpId][1],
partition.blockSize, 0, 0);
}
// there is no syncthread operation needed here as updates done by a thread in a
// warp is visible to all other threads in that warp
/*----------------------------------------------------------------------------------------------------
Translated Computation Stage
----------------------------------------------------------------------------------------------------*/
// the compute stage for IT matrix-matrix multiplication looks like the following
// do {
// c[i][j] = c[i][j] + a[i][k] * b[k][j]
// } for i, j in c; k in a
// In each warp we have 32 threads performing the same instruction in a lock-step
// fasion. If we can make the threads working on different piece of data then we
// can have a vectorized translation of the IT for loop without any additional data
// synchronization among the threads. A simple static analysis of the code block
// should detect that i and j indices both appeared on the left hand side of the
// enclosed statement but not the k index. So we can let different threads work on
// different i or j values. In general, we should avoid varying both indices at the
// same time to reduce memory bank conflicts.
// But how do we select the index for distribution among threads that has the best
// potential for coalescing global memory and reducing shared memory accesses? The
// selection also need be cautious about compromising opportunities of parallelism.
// The initial solution for this is incorporated in GPU utility library that, given,
// a set of ranges to iterate, provides loop starting indexes and step sizes.
int iterableRanges[4];
iterableRanges[0] = cSpaceBPRanges[warpId][0][0];
iterableRanges[1] = cSpaceBPRanges[warpId][0][1];
iterableRanges[2] = cSpaceBPRanges[warpId][1][0];
iterableRanges[3] = cSpaceBPRanges[warpId][1][1];
int indexesAndSteps[4];
determineLoopIndexesAndSteps(2, threadId, iterableRanges, indexesAndSteps);
// iterate over the rows
int iStart = indexesAndSteps[0];
int iEnd = iterableRanges[1];
int iStep = indexesAndSteps[1];
for (int i = iStart; i <= iEnd; i += iStep) {
int c_i = i - cPRanges[0][0];
int a_i = i - aPSubRanges[0][0];
// iterate over the columns
int jStart = indexesAndSteps[2];
int jEnd = iterableRanges[3];
int jStep = indexesAndSteps[3];
for (int j = jStart; j <= jEnd; j+= jStep) {
int c_j = j - cPRanges[1][0];
int b_j = j - bPSubRanges[1][0];
// iterate over the common dimension
int kStart = aSpaceBPRanges[warpId][1][0];
int kEnd = aSpaceBPRanges[warpId][1][1];
for (int k = kStart; k <= kEnd; k++) {
int a_k = k - aPSubRanges[1][0];
int b_k = k - bPSubRanges[0][0];
int cIndex = c_i * (cPRanges[1][1] - cPRanges[1][0] + 1) + c_j;
int aIndex = a_i * (aPSubRanges[1][1] - aPSubRanges[1][0] + 1) + a_k;
int bIndex = b_k * (bPSubRanges[1][1] - bPSubRanges[1][0] + 1) + b_j;
c_shared[cIndex] += a_shared[aIndex] * b_shared[bIndex];
} // k loop ends
} // j loop ends
} // i loop ends
} // Space B LPUs traversal ends
} // Space A sub-partition LPUs traveral ends
/*--------------------------------------------------------------------------------------------------------------------
Copying C Back to the Card Memory
--------------------------------------------------------------------------------------------------------------------*/
__syncthreads();
for (int i = cPRanges[0][0] + warpId; i <= cPRanges[0][1]; i += WARP_COUNT) {
for (int j = cPRanges[1][0] + threadId; j <= cPRanges[1][1]; j += WARP_SIZE) {
c[(i - cSRanges[0][0])
* (cSRanges[1][1] - cSRanges[1][0] + 1)
+ (j - cSRanges[1][0])]
= c_shared[(i - cPRanges[0][0])
* (cPRanges[1][1] - cPRanges[1][0] + 1)
+ (j - cPRanges[1][0])];
}
}
} // Space A LPUs traversal ends
}
//------------------------------------------------------- MMM GPU Code Executor ------------------------------------------------------/
MMMGpuCodeExecutor::MMMGpuCodeExecutor(LpuBatchController *lpuBatchController,
mmm::Partition partition,
mmm::ArrayMetadata arrayMetadata,
mmm::TaskGlobals *taskGlobals,
mmm::ThreadLocals *threadLocals)
: GpuCodeExecutor(lpuBatchController) {
this->partition = partition;
this->arrayMetadata = arrayMetadata;
this->taskGlobalsCpu = taskGlobals;
this->taskGlobalsGpu = NULL;
this->threadLocalsCpu = threadLocals;
this->threadLocalsGpu = NULL;
}
void MMMGpuCodeExecutor::offloadFunction() {
GpuBufferReferences *aBuffers = lpuBatchController->getGpuBufferReferences("a");
GpuBufferReferences *bBuffers = lpuBatchController->getGpuBufferReferences("b");
GpuBufferReferences *cBuffers = lpuBatchController->getGpuBufferReferences("c");
MMMLpuBatchRange batchRange;
batchRange.lpuIdRange = currentBatchLpuRange;
batchRange.lpuCount1 = lpuCount[0];
batchRange.lpuCount2 = lpuCount[1];
int threadsPerBlock = WARP_SIZE * WARP_COUNT;
int shared_memory_size = 3 * partition.blockSize * partition.blockSize * sizeof(double);
hipLaunchKernelGGL(( matrixMultiplyKernelSharedMem) , dim3(BLOCK_COUNT), dim3(threadsPerBlock), shared_memory_size , 0,
batchRange, partition, arrayMetadata,
taskGlobalsGpu, threadLocalsGpu,
*aBuffers, *bBuffers, *cBuffers);
delete aBuffers;
delete bBuffers;
delete cBuffers;
}
void MMMGpuCodeExecutor::initialize() {
GpuCodeExecutor::initialize();
size_t taskGlobalsSize = sizeof(taskGlobalsCpu);
hipMalloc((void **) &taskGlobalsGpu, taskGlobalsSize);
hipMemcpy(taskGlobalsGpu, taskGlobalsCpu, taskGlobalsSize, hipMemcpyHostToDevice);
size_t threadLocalsSize = sizeof(threadLocalsCpu);
hipMalloc((void **) &threadLocalsGpu, threadLocalsSize);
hipMemcpy(threadLocalsGpu, threadLocalsCpu, threadLocalsSize, hipMemcpyHostToDevice);
}
void MMMGpuCodeExecutor::cleanup() {
size_t taskGlobalsSize = sizeof(taskGlobalsCpu);
hipMemcpy(taskGlobalsCpu, taskGlobalsGpu, taskGlobalsSize, hipMemcpyDeviceToHost);
size_t threadLocalsSize = sizeof(threadLocalsCpu);
hipMemcpy(threadLocalsCpu, threadLocalsGpu, threadLocalsSize, hipMemcpyDeviceToHost);
GpuCodeExecutor::cleanup();
}
| 1eb9f3b42eff39a2b493898164802b9bf5112db7.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "mmm_gpu_execution.h"
#include "../../test-case/mmm/mmm_structure.h"
#include "../../runtime/structure.h"
#include "../../gpu-utils/gpu_constant.h"
#include "../../gpu-offloader/gpu_code_executor.h"
#include "../../gpu-offloader/lpu_parts_tracking.h"
#include "../../utils/list.h"
#include "../../gpu-utils/gpu_partition.h"
#include "../../gpu-utils/gpu_utils.h"
//----------------------------------------------------- MMM Batch LPU Controller -----------------------------------------------------/
MMMLpuBatchController::MMMLpuBatchController(int lpuCountThreshold, long memLimit) : LpuBatchController() {
List<const char*> *propertyNames = new List<const char*>;
propertyNames->Append("a");
propertyNames->Append("b");
propertyNames->Append("c");
List<const char*> *toBeModifiedProperties = new List<const char*>;
toBeModifiedProperties->Append("c");
setBufferManager(new LpuDataBufferManager(propertyNames));
initialize(lpuCountThreshold, memLimit, propertyNames, toBeModifiedProperties);
}
int MMMLpuBatchController::calculateLpuMemoryRequirement(LPU *lpu) {
mmm::MMMLpu *mmmLpu = (mmm::MMMLpu *) lpu;
int size = 0;
if (!dataPartTracker->isAlreadyIncluded(mmmLpu->aPartId, "a")) {
size += (mmmLpu->aPartDims[0].storage.getLength()
* mmmLpu->aPartDims[1].storage.getLength()) * sizeof(double);
}
if (!dataPartTracker->isAlreadyIncluded(mmmLpu->bPartId, "b")) {
size += (mmmLpu->bPartDims[0].storage.getLength()
* mmmLpu->bPartDims[1].storage.getLength()) * sizeof(double);
}
if (!dataPartTracker->isAlreadyIncluded(mmmLpu->aPartId, "c")) {
size += (mmmLpu->cPartDims[0].storage.getLength()
* mmmLpu->cPartDims[1].storage.getLength()) * sizeof(double);
}
return size;
}
void MMMLpuBatchController::addLpuToTheCurrentBatch(LPU *lpu) {
mmm::MMMLpu *mmmLpu = (mmm::MMMLpu *) lpu;
LpuDataPart *aPart = new LpuDataPart(2,
mmmLpu->aPartDims, mmmLpu->a, sizeof(double), mmmLpu->aPartId);
bool notRedundant = dataPartTracker->addDataPart(aPart, "a");
if (!notRedundant) {
delete aPart;
}
LpuDataPart *bPart = new LpuDataPart(2,
mmmLpu->bPartDims, mmmLpu->b, sizeof(double), mmmLpu->bPartId);
notRedundant = dataPartTracker->addDataPart(bPart, "b");
if (!notRedundant) {
delete bPart;
}
LpuDataPart *cPart = new LpuDataPart(2,
mmmLpu->cPartDims, mmmLpu->c, sizeof(double), mmmLpu->cPartId);
notRedundant = dataPartTracker->addDataPart(cPart, "c");
if (!notRedundant) {
delete cPart;
}
LpuBatchController::addLpuToTheCurrentBatch(lpu);
}
//------------------------------------------------------ Offloading GPU Kernels ------------------------------------------------------/
__global__ void matrixMultiplyKernel(MMMLpuBatchRange batchRange,
mmm::Partition partition,
mmm::ArrayMetadata arrayMetadata,
mmm::TaskGlobals *taskGlobals,
mmm::ThreadLocals *threadLocals,
GpuBufferReferences aBuffers,
GpuBufferReferences bBuffers,
GpuBufferReferences cBuffers) {
/*----------------------------------------------------------------------------------------------------------------------------
Space A: Top-most User Defined Space
----------------------------------------------------------------------------------------------------------------------------*/
// before we can do anything in the kernel, we need to determine the thread, warp, and sm IDs of the thread
// executing the kernel code
int smId = blockIdx.x;
int warpId = threadIdx.x / WARP_SIZE;
int threadId = threadIdx.x % WARP_SIZE;
// variables for holding the data part references for the top-space LPU
__shared__ double *a, *b, *c;
// variables for tracking storage and partition dimensions of the top space LPU's data parts
__shared__ int aSRanges[2][2], bSRanges[2][2], cSRanges[2][2];
__shared__ int aPRanges[2][2], bPRanges[2][2], cPRanges[2][2];
// SMs stride over different indexes to get different LPUs to operate on
Range lpuIdRange = batchRange.lpuIdRange;
for (int linearId = lpuIdRange.min + smId; linearId <= lpuIdRange.max; linearId += BLOCK_COUNT) {
// all threads should synchronize here to prevent the LPU metadata writer threads to overwrite the old
// values before the other threads are done using those values
__syncthreads();
// point the a, b, c matrix references to the memory addresses where corresponding data parts for the
// current LPUs starts
if (warpId == 0 && threadId == 0) {
//------------------------------------------------------------- retrieve a and its dimensions
__shared__ int lpuIndex, aIndex, aStartsAt, aDimRangeStart;
lpuIndex = linearId - lpuIdRange.min;
aIndex = aBuffers.partIndexBuffer[lpuIndex];
aStartsAt = aBuffers.partBeginningBuffer[aIndex];
a = (double *) (aBuffers.dataBuffer + aStartsAt);
aDimRangeStart = aIndex * 2 * 2 * 2;
aSRanges[0][0] = aBuffers.partRangeBuffer[aDimRangeStart];
aSRanges[0][1] = aBuffers.partRangeBuffer[aDimRangeStart + 1];
aSRanges[1][0] = aBuffers.partRangeBuffer[aDimRangeStart + 2];
aSRanges[1][1] = aBuffers.partRangeBuffer[aDimRangeStart + 3];
aPRanges[0][0] = aBuffers.partRangeBuffer[aDimRangeStart + 4];
aPRanges[0][1] = aBuffers.partRangeBuffer[aDimRangeStart + 5];
aPRanges[1][0] = aBuffers.partRangeBuffer[aDimRangeStart + 6];
aPRanges[1][1] = aBuffers.partRangeBuffer[aDimRangeStart + 7];
//------------------------------------------------------------- retrieve b and its dimensions
__shared__ int bIndex, bStartsAt, bDimRangeStart;
bIndex = bBuffers.partIndexBuffer[lpuIndex];
bStartsAt = bBuffers.partBeginningBuffer[bIndex];
b = (double *) (bBuffers.dataBuffer + bStartsAt);
bDimRangeStart = bIndex * 2 * 2 * 2;
bSRanges[0][0] = bBuffers.partRangeBuffer[bDimRangeStart];
bSRanges[0][1] = bBuffers.partRangeBuffer[bDimRangeStart + 1];
bSRanges[1][0] = bBuffers.partRangeBuffer[bDimRangeStart + 2];
bSRanges[1][1] = bBuffers.partRangeBuffer[bDimRangeStart + 3];
bPRanges[0][0] = bBuffers.partRangeBuffer[bDimRangeStart + 4];
bPRanges[0][1] = bBuffers.partRangeBuffer[bDimRangeStart + 5];
bPRanges[1][0] = bBuffers.partRangeBuffer[bDimRangeStart + 6];
bPRanges[1][1] = bBuffers.partRangeBuffer[bDimRangeStart + 7];
//------------------------------------------------------------- retrieve c and its dimensions
__shared__ int cIndex, cStartsAt, cDimRangeStart;
cIndex = cBuffers.partIndexBuffer[lpuIndex];
cStartsAt = cBuffers.partBeginningBuffer[cIndex];
c = (double *) (cBuffers.dataBuffer + cStartsAt);
cDimRangeStart = cIndex * 2 * 2 * 2;
cSRanges[0][0] = cBuffers.partRangeBuffer[cDimRangeStart];
cSRanges[0][1] = cBuffers.partRangeBuffer[cDimRangeStart + 1];
cSRanges[1][0] = cBuffers.partRangeBuffer[cDimRangeStart + 2];
cSRanges[1][1] = cBuffers.partRangeBuffer[cDimRangeStart + 3];
cPRanges[0][0] = cBuffers.partRangeBuffer[cDimRangeStart + 4];
cPRanges[0][1] = cBuffers.partRangeBuffer[cDimRangeStart + 5];
cPRanges[1][0] = cBuffers.partRangeBuffer[cDimRangeStart + 6];
cPRanges[1][1] = cBuffers.partRangeBuffer[cDimRangeStart + 7];
}
__syncthreads();
/*--------------------------------------------------------------------------------------------------------------------
Space A-Sub: Compiler Generated Space for Subpartition
--------------------------------------------------------------------------------------------------------------------*/
// once we have the storage and partition dimensions of data structure at the top-level space's LPU
// we can determine the sub-partition space's Lpu count
int subpartitionCount = block_size_part_count(bPRanges[0], partition.blockSize);
__shared__ int aPSubRanges[2][2], bPSubRanges[2][2];
// the subpartitioned LPUs are processed one by one; remember that LPUs of sub-partitioned LPSes are
// not supposed to be distributed
for (int subLpu = 0; subLpu < subpartitionCount; subLpu++) {
// the first warp should not advance to the next sub-partition when other warps have not yet
// finish their computation for the current sub-partition
__syncthreads();
if (warpId == 0 && threadId == 0) {
// first we need to determine the partition dimension ranges of the two sub-
// partitioned data structures, which are matrix A and B
__shared__ int lpuId;
lpuId = subLpu;
aPSubRanges[0][0] = aPRanges[0][0];
aPSubRanges[0][1] = aPRanges[0][1];
block_size_part_range(aPSubRanges[1], aPRanges[1],
subpartitionCount, lpuId, partition.blockSize, 0, 0);
block_size_part_range(bPSubRanges[0], bPRanges[0],
subpartitionCount, lpuId, partition.blockSize, 0, 0);
bPSubRanges[1][0] = bPRanges[1][0];
bPSubRanges[1][1] = bPRanges[1][1];
}
__syncthreads();
/*------------------------------------------------------------------------------------------------------------
// here we should load sub-section of A and B from the GPU card memory to the local memory
// what about C? Or should we directly perform all computation on the card memory and rely
// on the hardware's caching machanism to do the global and shared memory interactions?
// In the multicore and segmented memory architecture cases the matrix-matrix multiplication
// code starts here. In the GPU, the existing partition scheme will result in only one warp
// within an SM doing computation for the user code. Rather the user should have the
// computation to be distributed to multiple warps for different smaller sub-sections of the
// block of matrix C using another lower level LPS
--------------------------------------------------------------------------------------------------------------
Space B: Lowest User Defined Space
------------------------------------------------------------------------------------------------------------*/
// Space B LPUs will be distributed among the warps; so the parts' dimension configuration
// should be different for different warps and we cannot have a single shared object per
// part information as we have done in the previous LPSes. Rather, we will have a shared
// memory pannel having 1 entry per warp to hold relevant part's dimension configuration.
__shared__ int aSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int bSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int cSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int spaceBLpuCount1, spaceBLpuCount2, spaceBLpuCount;
if (warpId == 0 && threadId == 0) {
spaceBLpuCount1 = block_size_part_count(cPRanges[0], 1);
spaceBLpuCount2 = block_size_part_count(cPRanges[1], partition.blockSize);
spaceBLpuCount = spaceBLpuCount1 * spaceBLpuCount2;
}
__syncthreads();
// distribute the Space B LPUs among the warps
__shared__ int spaceBLpuId[WARP_COUNT][2];
for (int spaceBLpu = warpId; spaceBLpu < spaceBLpuCount; spaceBLpu += WARP_COUNT) {
if (threadId == 0) {
// construct the 2 dimensional LPU ID from the linear LPU Id
spaceBLpuId[warpId][0] = spaceBLpu / spaceBLpuCount2;
spaceBLpuId[warpId][1] = spaceBLpu % spaceBLpuCount2;
//---------------------------------------------------- A part dimensions
block_size_part_range(aSpaceBPRanges[warpId][0], aPSubRanges[0],
spaceBLpuCount1, spaceBLpuId[warpId][0], 1, 0, 0);
aSpaceBPRanges[warpId][1][0] = aPSubRanges[1][0];
aSpaceBPRanges[warpId][1][1] = aPSubRanges[1][1];
//---------------------------------------------------- B part dimensions
bSpaceBPRanges[warpId][0][0] = bPSubRanges[0][0];
bSpaceBPRanges[warpId][0][1] = bPSubRanges[0][1];
block_size_part_range(bSpaceBPRanges[warpId][1], bPSubRanges[1],
spaceBLpuCount2, spaceBLpuId[warpId][1],
partition.blockSize, 0, 0);
//---------------------------------------------------- C part dimensions
block_size_part_range(cSpaceBPRanges[warpId][0], cPRanges[0],
spaceBLpuCount1, spaceBLpuId[warpId][0], 1, 0, 0);
block_size_part_range(cSpaceBPRanges[warpId][1], cPRanges[1],
spaceBLpuCount2, spaceBLpuId[warpId][1],
partition.blockSize, 0, 0);
}
// there is no syncthread operation needed here as updates done by a thread in a
// warp is visible to all other threads in that warp
/*----------------------------------------------------------------------------------------------------
Translated Computation Stage
----------------------------------------------------------------------------------------------------*/
// the compute stage for IT matrix-matrix multiplication looks like the following
// do {
// c[i][j] = c[i][j] + a[i][k] * b[k][j]
// } for i, j in c; k in a
// In each warp we have 32 threads performing the same instruction in a lock-step
// fasion. If we can make the threads working on different piece of data then we
// can have a vectorized translation of the IT for loop without any additional data
// synchronization among the threads. A simple static analysis of the code block
// should detect that i and j indices both appeared on the left hand side of the
// enclosed statement but not the k index. So we can let different threads work on
// different i or j values. In general, we should avoid varying both indices at the
// same time to reduce memory bank conflicts.
// But how do we select the index for distribution among threads that has the best
// potential for coalescing global memory and reducing shared memory accesses? The
// selection also need be cautious about compromising opportunities of parallelism.
// The initial solution for this is incorporated in GPU utility library that, given,
// a set of ranges to iterate, provides loop starting indexes and step sizes.
int iterableRanges[4];
iterableRanges[0] = cSpaceBPRanges[warpId][0][0];
iterableRanges[1] = cSpaceBPRanges[warpId][0][1];
iterableRanges[2] = cSpaceBPRanges[warpId][1][0];
iterableRanges[3] = cSpaceBPRanges[warpId][1][1];
int indexesAndSteps[4];
determineLoopIndexesAndSteps(2, threadId, iterableRanges, indexesAndSteps);
// iterate over the rows
int iStart = indexesAndSteps[0];
int iEnd = iterableRanges[1];
int iStep = indexesAndSteps[1];
for (int i = iStart; i <= iEnd; i += iStep) {
int c_i = i - cSRanges[0][0];
int a_i = i - aSRanges[0][0];
// iterate over the columns
int jStart = indexesAndSteps[2];
int jEnd = iterableRanges[3];
int jStep = indexesAndSteps[3];
for (int j = jStart; j <= jEnd; j+= jStep) {
int c_j = j - cSRanges[1][0];
int b_j = j - bSRanges[1][0];
// iterate over the common dimension
int kStart = aSpaceBPRanges[warpId][1][0];
int kEnd = aSpaceBPRanges[warpId][1][1];
for (int k = kStart; k <= kEnd; k++) {
int a_k = k - aSRanges[1][0];
int b_k = k - bSRanges[0][0];
int cIndex = c_i * (cSRanges[1][1] - cSRanges[1][0] + 1) + c_j;
int aIndex = a_i * (aSRanges[1][1] - aSRanges[1][0] + 1) + a_k;
int bIndex = b_k * (bSRanges[1][1] - bSRanges[1][0] + 1) + b_j;
c[cIndex] += a[aIndex] * b[bIndex];
}
}
}
}
}
}
}
__global__ void matrixMultiplyKernelSharedMem(MMMLpuBatchRange batchRange,
mmm::Partition partition,
mmm::ArrayMetadata arrayMetadata,
mmm::TaskGlobals *taskGlobals,
mmm::ThreadLocals *threadLocals,
GpuBufferReferences aBuffers,
GpuBufferReferences bBuffers,
GpuBufferReferences cBuffers) {
// declaration of the shared dynamic memory panel that will be used to create SM-local data parts and an index
// that keeps track of the next free location in the memory panel
extern __shared__ char MEMORY_PANEL[];
__shared__ int FREE_MEMORY_INDEX;
FREE_MEMORY_INDEX = 0;
/*----------------------------------------------------------------------------------------------------------------------------
Space A: Top-most User Defined Space
----------------------------------------------------------------------------------------------------------------------------*/
// before we can do anything in the kernel, we need to determine the thread, warp, and sm IDs of the thread
// executing the kernel code
int smId = blockIdx.x;
int warpId = threadIdx.x / WARP_SIZE;
int threadId = threadIdx.x % WARP_SIZE;
// variables for holding the data part references for the top-space LPU
__shared__ double *a, *b, *c;
// variables for tracking storage and partition dimensions of the top space LPU's data parts
__shared__ int aSRanges[2][2], bSRanges[2][2], cSRanges[2][2];
__shared__ int aPRanges[2][2], bPRanges[2][2], cPRanges[2][2];
// SMs stride over different indexes to get different LPUs to operate on
Range lpuIdRange = batchRange.lpuIdRange;
for (int linearId = lpuIdRange.min + smId; linearId <= lpuIdRange.max; linearId += BLOCK_COUNT) {
// all threads should synchronize here to prevent the LPU metadata writer threads to overwrite the old
// values before the other threads are done using those values
__syncthreads();
// point the a, b, c matrix references to the memory addresses where corresponding data parts for the
// current LPUs starts
if (warpId == 0 && threadId == 0) {
//------------------------------------------------------------- retrieve a and its dimensions
__shared__ int lpuIndex, aIndex, aStartsAt, aDimRangeStart;
lpuIndex = linearId - lpuIdRange.min;
aIndex = aBuffers.partIndexBuffer[lpuIndex];
aStartsAt = aBuffers.partBeginningBuffer[aIndex];
a = (double *) (aBuffers.dataBuffer + aStartsAt);
aDimRangeStart = aIndex * 2 * 2 * 2;
aSRanges[0][0] = aBuffers.partRangeBuffer[aDimRangeStart];
aSRanges[0][1] = aBuffers.partRangeBuffer[aDimRangeStart + 1];
aSRanges[1][0] = aBuffers.partRangeBuffer[aDimRangeStart + 2];
aSRanges[1][1] = aBuffers.partRangeBuffer[aDimRangeStart + 3];
aPRanges[0][0] = aBuffers.partRangeBuffer[aDimRangeStart + 4];
aPRanges[0][1] = aBuffers.partRangeBuffer[aDimRangeStart + 5];
aPRanges[1][0] = aBuffers.partRangeBuffer[aDimRangeStart + 6];
aPRanges[1][1] = aBuffers.partRangeBuffer[aDimRangeStart + 7];
//------------------------------------------------------------- retrieve b and its dimensions
__shared__ int bIndex, bStartsAt, bDimRangeStart;
bIndex = bBuffers.partIndexBuffer[lpuIndex];
bStartsAt = bBuffers.partBeginningBuffer[bIndex];
b = (double *) (bBuffers.dataBuffer + bStartsAt);
bDimRangeStart = bIndex * 2 * 2 * 2;
bSRanges[0][0] = bBuffers.partRangeBuffer[bDimRangeStart];
bSRanges[0][1] = bBuffers.partRangeBuffer[bDimRangeStart + 1];
bSRanges[1][0] = bBuffers.partRangeBuffer[bDimRangeStart + 2];
bSRanges[1][1] = bBuffers.partRangeBuffer[bDimRangeStart + 3];
bPRanges[0][0] = bBuffers.partRangeBuffer[bDimRangeStart + 4];
bPRanges[0][1] = bBuffers.partRangeBuffer[bDimRangeStart + 5];
bPRanges[1][0] = bBuffers.partRangeBuffer[bDimRangeStart + 6];
bPRanges[1][1] = bBuffers.partRangeBuffer[bDimRangeStart + 7];
//------------------------------------------------------------- retrieve c and its dimensions
__shared__ int cIndex, cStartsAt, cDimRangeStart;
cIndex = cBuffers.partIndexBuffer[lpuIndex];
cStartsAt = cBuffers.partBeginningBuffer[cIndex];
c = (double *) (cBuffers.dataBuffer + cStartsAt);
cDimRangeStart = cIndex * 2 * 2 * 2;
cSRanges[0][0] = cBuffers.partRangeBuffer[cDimRangeStart];
cSRanges[0][1] = cBuffers.partRangeBuffer[cDimRangeStart + 1];
cSRanges[1][0] = cBuffers.partRangeBuffer[cDimRangeStart + 2];
cSRanges[1][1] = cBuffers.partRangeBuffer[cDimRangeStart + 3];
cPRanges[0][0] = cBuffers.partRangeBuffer[cDimRangeStart + 4];
cPRanges[0][1] = cBuffers.partRangeBuffer[cDimRangeStart + 5];
cPRanges[1][0] = cBuffers.partRangeBuffer[cDimRangeStart + 6];
cPRanges[1][1] = cBuffers.partRangeBuffer[cDimRangeStart + 7];
}
__syncthreads();
/*--------------------------------------------------------------------------------------------------------------------
Copying C into shared memory
--------------------------------------------------------------------------------------------------------------------*/
// allocate enough space for C part in the dynamic shared memory panel
__shared__ double *c_shared;
if (warpId == 0 && threadId == 0) {
c_shared = (double *) MEMORY_PANEL;
//c_shared = (double *) MEMORY_PANEL[FREE_MEMORY_INDEX];
int cSize = (cPRanges[0][1] - cPRanges[0][0] + 1)
* (cPRanges[1][1] - cPRanges[1][0] + 1) * sizeof(double);
FREE_MEMORY_INDEX += (cSize / MEMORY_PANNEL_ALIGNMENT_BOUNDARY)
* MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
if (cSize % MEMORY_PANNEL_ALIGNMENT_BOUNDARY != 0) {
FREE_MEMORY_INDEX += MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
}
}
__syncthreads();
// cooperatively load the current values of C from card memory part to the shared part
for (int i = cPRanges[0][0] + warpId; i <= cPRanges[0][1]; i += WARP_COUNT) {
for (int j = cPRanges[1][0] + threadId; j <= cPRanges[1][1]; j += WARP_SIZE) {
c_shared[(i - cPRanges[0][0])
* (cPRanges[1][1] - cPRanges[1][0] + 1)
+ (j - cPRanges[1][0])]
= c[(i - cSRanges[0][0])
* (cSRanges[1][1] - cSRanges[1][0] + 1)
+ (j - cSRanges[1][0])];
}
}
/*--------------------------------------------------------------------------------------------------------------------
Space A-Sub: Compiler Generated Space for Subpartition
--------------------------------------------------------------------------------------------------------------------*/
// once we have the storage and partition dimensions of data structure at the top-level space's LPU
// we can determine the sub-partition space's Lpu count
int subpartitionCount = block_size_part_count(bPRanges[0], partition.blockSize);
__shared__ int aPSubRanges[2][2], bPSubRanges[2][2];
// the subpartitioned LPUs are processed one by one; remember that LPUs of sub-partitioned LPSes are
// not supposed to be distributed
for (int subLpu = 0; subLpu < subpartitionCount; subLpu++) {
// the first warp should not advance to the next sub-partition when other warps have not yet
// finish their computation for the current sub-partition
__syncthreads();
if (warpId == 0 && threadId == 0) {
// first we need to determine the partition dimension ranges of the two sub-
// partitioned data structures, which are matrix A and B
__shared__ int lpuId;
lpuId = subLpu;
aPSubRanges[0][0] = aPRanges[0][0];
aPSubRanges[0][1] = aPRanges[0][1];
block_size_part_range(aPSubRanges[1], aPRanges[1],
subpartitionCount, lpuId, partition.blockSize, 0, 0);
block_size_part_range(bPSubRanges[0], bPRanges[0],
subpartitionCount, lpuId, partition.blockSize, 0, 0);
bPSubRanges[1][0] = bPRanges[1][0];
bPSubRanges[1][1] = bPRanges[1][1];
}
__syncthreads();
/*------------------------------------------------------------------------------------------------------------
Copying A and B into Shared Memory
------------------------------------------------------------------------------------------------------------*/
// allocate enough space for the two shared variables in the dynamic shared memory panel
__shared__ double *a_shared, *b_shared;
if (warpId == 0 && threadId == 0) {
a_shared = (double *) (MEMORY_PANEL + 32 * 32 * sizeof(double));
//a_shared = (double *) MEMORY_PANEL[FREE_MEMORY_INDEX];
int aSize = (aPSubRanges[0][1] - aPSubRanges[0][0] + 1)
* (aPSubRanges[1][1] - aPSubRanges[1][0] + 1) * sizeof(double);
FREE_MEMORY_INDEX += (aSize / MEMORY_PANNEL_ALIGNMENT_BOUNDARY)
* MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
if (aSize % MEMORY_PANNEL_ALIGNMENT_BOUNDARY != 0) {
FREE_MEMORY_INDEX += MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
}
b_shared = (double *) (MEMORY_PANEL + 2 * 32 * 32 * sizeof(double));
//b_shared = (double *) MEMORY_PANEL[FREE_MEMORY_INDEX];
int bSize = (bPSubRanges[0][1] - bPSubRanges[0][0] + 1)
* (bPSubRanges[1][1] - bPSubRanges[1][0] + 1) * sizeof(double);
FREE_MEMORY_INDEX += (bSize / MEMORY_PANNEL_ALIGNMENT_BOUNDARY)
* MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
if (bSize % MEMORY_PANNEL_ALIGNMENT_BOUNDARY != 0) {
FREE_MEMORY_INDEX += MEMORY_PANNEL_ALIGNMENT_BOUNDARY;
}
}
__syncthreads();
// copy data from the global memory to the SM memory
for (int i = aPSubRanges[0][0] + warpId; i <= aPSubRanges[0][1]; i += WARP_COUNT) {
for (int j = aPSubRanges[1][0] + threadId;
j <= aPSubRanges[1][1]; j += WARP_SIZE) {
a_shared[(i - aPSubRanges[0][0])
* (aPSubRanges[1][1] - aPSubRanges[1][0] + 1)
+ (j - aPSubRanges[1][0])]
= a[(i - aSRanges[0][0])
* (aSRanges[1][1] - aSRanges[1][0] + 1)
+ (j - aSRanges[1][0])];
}
}
for (int i = bPSubRanges[0][0] + warpId; i <= bPSubRanges[0][1]; i += WARP_COUNT) {
for (int j = bPSubRanges[1][0] + threadId;
j <= bPSubRanges[1][1]; j += WARP_SIZE) {
b_shared[(i - bPSubRanges[0][0])
* (bPSubRanges[1][1] - bPSubRanges[1][0] + 1)
+ (j - bPSubRanges[1][0])]
= b[(i - bSRanges[0][0])
* (bSRanges[1][1] - bSRanges[1][0] + 1)
+ (j - bSRanges[1][0])];
}
}
__syncthreads();
/*------------------------------------------------------------------------------------------------------------
// here we should load sub-section of A and B from the GPU card memory to the local memory
// what about C? Or should we directly perform all computation on the card memory and rely
// on the hardware's caching machanism to do the global and shared memory interactions?
// In the multicore and segmented memory architecture cases the matrix-matrix multiplication
// code starts here. In the GPU, the existing partition scheme will result in only one warp
// within an SM doing computation for the user code. Rather the user should have the
// computation to be distributed to multiple warps for different smaller sub-sections of the
// block of matrix C using another lower level LPS
--------------------------------------------------------------------------------------------------------------
Space B: Lowest User Defined Space
------------------------------------------------------------------------------------------------------------*/
// Space B LPUs will be distributed among the warps; so the parts' dimension configuration
// should be different for different warps and we cannot have a single shared object per
// part information as we have done in the previous LPSes. Rather, we will have a shared
// memory pannel having 1 entry per warp to hold relevant part's dimension configuration.
__shared__ int aSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int bSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int cSpaceBPRanges[WARP_COUNT][2][2];
__shared__ int spaceBLpuCount1, spaceBLpuCount2, spaceBLpuCount;
if (warpId == 0 && threadId == 0) {
spaceBLpuCount1 = block_size_part_count(cPRanges[0], 1);
spaceBLpuCount2 = block_size_part_count(cPRanges[1], partition.blockSize);
spaceBLpuCount = spaceBLpuCount1 * spaceBLpuCount2;
}
__syncthreads();
// distribute the Space B LPUs among the warps
__shared__ int spaceBLpuId[WARP_COUNT][2];
for (int spaceBLpu = warpId; spaceBLpu < spaceBLpuCount; spaceBLpu += WARP_COUNT) {
if (threadId == 0) {
// construct the 2 dimensional LPU ID from the linear LPU Id
spaceBLpuId[warpId][0] = spaceBLpu / spaceBLpuCount2;
spaceBLpuId[warpId][1] = spaceBLpu % spaceBLpuCount2;
//---------------------------------------------------- A part dimensions
block_size_part_range(aSpaceBPRanges[warpId][0], aPSubRanges[0],
spaceBLpuCount1, spaceBLpuId[warpId][0], 1, 0, 0);
aSpaceBPRanges[warpId][1][0] = aPSubRanges[1][0];
aSpaceBPRanges[warpId][1][1] = aPSubRanges[1][1];
//---------------------------------------------------- B part dimensions
bSpaceBPRanges[warpId][0][0] = bPSubRanges[0][0];
bSpaceBPRanges[warpId][0][1] = bPSubRanges[0][1];
block_size_part_range(bSpaceBPRanges[warpId][1], bPSubRanges[1],
spaceBLpuCount2, spaceBLpuId[warpId][1],
partition.blockSize, 0, 0);
//---------------------------------------------------- C part dimensions
block_size_part_range(cSpaceBPRanges[warpId][0], cPRanges[0],
spaceBLpuCount1, spaceBLpuId[warpId][0], 1, 0, 0);
block_size_part_range(cSpaceBPRanges[warpId][1], cPRanges[1],
spaceBLpuCount2, spaceBLpuId[warpId][1],
partition.blockSize, 0, 0);
}
// there is no syncthread operation needed here as updates done by a thread in a
// warp is visible to all other threads in that warp
/*----------------------------------------------------------------------------------------------------
Translated Computation Stage
----------------------------------------------------------------------------------------------------*/
// the compute stage for IT matrix-matrix multiplication looks like the following
// do {
// c[i][j] = c[i][j] + a[i][k] * b[k][j]
// } for i, j in c; k in a
// In each warp we have 32 threads performing the same instruction in a lock-step
// fasion. If we can make the threads working on different piece of data then we
// can have a vectorized translation of the IT for loop without any additional data
// synchronization among the threads. A simple static analysis of the code block
// should detect that i and j indices both appeared on the left hand side of the
// enclosed statement but not the k index. So we can let different threads work on
// different i or j values. In general, we should avoid varying both indices at the
// same time to reduce memory bank conflicts.
// But how do we select the index for distribution among threads that has the best
// potential for coalescing global memory and reducing shared memory accesses? The
// selection also need be cautious about compromising opportunities of parallelism.
// The initial solution for this is incorporated in GPU utility library that, given,
// a set of ranges to iterate, provides loop starting indexes and step sizes.
int iterableRanges[4];
iterableRanges[0] = cSpaceBPRanges[warpId][0][0];
iterableRanges[1] = cSpaceBPRanges[warpId][0][1];
iterableRanges[2] = cSpaceBPRanges[warpId][1][0];
iterableRanges[3] = cSpaceBPRanges[warpId][1][1];
int indexesAndSteps[4];
determineLoopIndexesAndSteps(2, threadId, iterableRanges, indexesAndSteps);
// iterate over the rows
int iStart = indexesAndSteps[0];
int iEnd = iterableRanges[1];
int iStep = indexesAndSteps[1];
for (int i = iStart; i <= iEnd; i += iStep) {
int c_i = i - cPRanges[0][0];
int a_i = i - aPSubRanges[0][0];
// iterate over the columns
int jStart = indexesAndSteps[2];
int jEnd = iterableRanges[3];
int jStep = indexesAndSteps[3];
for (int j = jStart; j <= jEnd; j+= jStep) {
int c_j = j - cPRanges[1][0];
int b_j = j - bPSubRanges[1][0];
// iterate over the common dimension
int kStart = aSpaceBPRanges[warpId][1][0];
int kEnd = aSpaceBPRanges[warpId][1][1];
for (int k = kStart; k <= kEnd; k++) {
int a_k = k - aPSubRanges[1][0];
int b_k = k - bPSubRanges[0][0];
int cIndex = c_i * (cPRanges[1][1] - cPRanges[1][0] + 1) + c_j;
int aIndex = a_i * (aPSubRanges[1][1] - aPSubRanges[1][0] + 1) + a_k;
int bIndex = b_k * (bPSubRanges[1][1] - bPSubRanges[1][0] + 1) + b_j;
c_shared[cIndex] += a_shared[aIndex] * b_shared[bIndex];
} // k loop ends
} // j loop ends
} // i loop ends
} // Space B LPUs traversal ends
} // Space A sub-partition LPUs traveral ends
/*--------------------------------------------------------------------------------------------------------------------
Copying C Back to the Card Memory
--------------------------------------------------------------------------------------------------------------------*/
__syncthreads();
for (int i = cPRanges[0][0] + warpId; i <= cPRanges[0][1]; i += WARP_COUNT) {
for (int j = cPRanges[1][0] + threadId; j <= cPRanges[1][1]; j += WARP_SIZE) {
c[(i - cSRanges[0][0])
* (cSRanges[1][1] - cSRanges[1][0] + 1)
+ (j - cSRanges[1][0])]
= c_shared[(i - cPRanges[0][0])
* (cPRanges[1][1] - cPRanges[1][0] + 1)
+ (j - cPRanges[1][0])];
}
}
} // Space A LPUs traversal ends
}
//------------------------------------------------------- MMM GPU Code Executor ------------------------------------------------------/
MMMGpuCodeExecutor::MMMGpuCodeExecutor(LpuBatchController *lpuBatchController,
mmm::Partition partition,
mmm::ArrayMetadata arrayMetadata,
mmm::TaskGlobals *taskGlobals,
mmm::ThreadLocals *threadLocals)
: GpuCodeExecutor(lpuBatchController) {
this->partition = partition;
this->arrayMetadata = arrayMetadata;
this->taskGlobalsCpu = taskGlobals;
this->taskGlobalsGpu = NULL;
this->threadLocalsCpu = threadLocals;
this->threadLocalsGpu = NULL;
}
void MMMGpuCodeExecutor::offloadFunction() {
GpuBufferReferences *aBuffers = lpuBatchController->getGpuBufferReferences("a");
GpuBufferReferences *bBuffers = lpuBatchController->getGpuBufferReferences("b");
GpuBufferReferences *cBuffers = lpuBatchController->getGpuBufferReferences("c");
MMMLpuBatchRange batchRange;
batchRange.lpuIdRange = currentBatchLpuRange;
batchRange.lpuCount1 = lpuCount[0];
batchRange.lpuCount2 = lpuCount[1];
int threadsPerBlock = WARP_SIZE * WARP_COUNT;
int shared_memory_size = 3 * partition.blockSize * partition.blockSize * sizeof(double);
matrixMultiplyKernelSharedMem <<< BLOCK_COUNT, threadsPerBlock, shared_memory_size >>>
(batchRange, partition, arrayMetadata,
taskGlobalsGpu, threadLocalsGpu,
*aBuffers, *bBuffers, *cBuffers);
delete aBuffers;
delete bBuffers;
delete cBuffers;
}
void MMMGpuCodeExecutor::initialize() {
GpuCodeExecutor::initialize();
size_t taskGlobalsSize = sizeof(taskGlobalsCpu);
cudaMalloc((void **) &taskGlobalsGpu, taskGlobalsSize);
cudaMemcpy(taskGlobalsGpu, taskGlobalsCpu, taskGlobalsSize, cudaMemcpyHostToDevice);
size_t threadLocalsSize = sizeof(threadLocalsCpu);
cudaMalloc((void **) &threadLocalsGpu, threadLocalsSize);
cudaMemcpy(threadLocalsGpu, threadLocalsCpu, threadLocalsSize, cudaMemcpyHostToDevice);
}
void MMMGpuCodeExecutor::cleanup() {
size_t taskGlobalsSize = sizeof(taskGlobalsCpu);
cudaMemcpy(taskGlobalsCpu, taskGlobalsGpu, taskGlobalsSize, cudaMemcpyDeviceToHost);
size_t threadLocalsSize = sizeof(threadLocalsCpu);
cudaMemcpy(threadLocalsCpu, threadLocalsGpu, threadLocalsSize, cudaMemcpyDeviceToHost);
GpuCodeExecutor::cleanup();
}
|
df09d3d47eda071f308b705ba7651061ce9050b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "common_cuda.h"
#include "equilibrium_solver/minimization_result_info_cuda.h"
namespace equilibrium_solver {
// make variables on the device visible
#ifdef __CUDA_ARCH__
using namespace common_device;
#else
using namespace common;
#endif
constexpr size_t minimization_info_update_batch_size = 128;
__device__ unsigned int min_info_update_counter;
__global__
void minimization_info_update_kernel(MinimizationResultInfoCuda& info, MinizationsOptions& options,
numeric_t* device_Js_ptr,
numeric_t* device_Fs_ptr,
numeric_t* device_Js_ptrs,
numeric_t* device_Fs_ptrs) {
constexpr size_t bs = minimization_info_update_batch_size;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < (info.n+bs)/bs) {
// determine problems that have converged and update active_indices array
for(size_t i=idx*bs; i<idx*(bs+1) && i<info.n; ++i) {
unsigned int k = atomicInc(min_info_update_counter, 4294967295);
if (error[i]<options.tol) {
converged[i] = true;
} else {
active_indices[k] = i;
}
}
n_active = k;
}
}
} | df09d3d47eda071f308b705ba7651061ce9050b6.cu | #include "common.h"
#include "common_cuda.h"
#include "equilibrium_solver/minimization_result_info_cuda.h"
namespace equilibrium_solver {
// make variables on the device visible
#ifdef __CUDA_ARCH__
using namespace common_device;
#else
using namespace common;
#endif
constexpr size_t minimization_info_update_batch_size = 128;
__device__ unsigned int min_info_update_counter;
__global__
void minimization_info_update_kernel(MinimizationResultInfoCuda& info, MinizationsOptions& options,
numeric_t* device_Js_ptr,
numeric_t* device_Fs_ptr,
numeric_t* device_Js_ptrs,
numeric_t* device_Fs_ptrs) {
constexpr size_t bs = minimization_info_update_batch_size;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < (info.n+bs)/bs) {
// determine problems that have converged and update active_indices array
for(size_t i=idx*bs; i<idx*(bs+1) && i<info.n; ++i) {
unsigned int k = atomicInc(min_info_update_counter, 4294967295);
if (error[i]<options.tol) {
converged[i] = true;
} else {
active_indices[k] = i;
}
}
n_active = k;
}
}
} |
773ab1871b6d573b59a04211ceaf3acff6b636fd.hip | // !!! This is a file automatically generated by hipify!!!
/***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define MAX_THREADS_PER_BLOCK 512
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
#include "kernel.hip"
#include "kernel2.cu"
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
no_of_nodes=0;
edge_list_size=0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp)
fclose(fp);
printf("Read File\n");
//Copy the Node list to device memory
Node* d_graph_nodes;
hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
hipMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
hipMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
bool* d_updating_graph_mask;
hipMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
hipMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
// allocate mem for the result on host side
int8_t* h_cost = (int8_t*) malloc( sizeof(int8_t)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int8_t* d_cost;
hipMalloc( (void**) &d_cost, sizeof(int8_t)*no_of_nodes);
hipMemcpy( d_cost, h_cost, sizeof(int8_t)*no_of_nodes, hipMemcpyHostToDevice) ;
//make a bool to check if the execution is over
bool *d_over;
hipMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
hipMemcpy( d_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ;
hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
// check if kernel execution generated and error
hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
hipMemcpy( &stop, d_over, sizeof(bool), hipMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
hipMemcpy( h_cost, d_cost, sizeof(int8_t)*no_of_nodes, hipMemcpyDeviceToHost) ;
//Store the result into a file
FILE *fpo = fopen("result.txt","w");
for(int i=0;i<no_of_nodes;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// cleanup memory
free( h_graph_nodes);
free( h_graph_edges);
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
hipFree(d_graph_nodes);
hipFree(d_graph_edges);
hipFree(d_graph_mask);
hipFree(d_updating_graph_mask);
hipFree(d_graph_visited);
hipFree(d_cost);
}
| 773ab1871b6d573b59a04211ceaf3acff6b636fd.cu | /***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#define MAX_THREADS_PER_BLOCK 512
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
#include "kernel.cu"
#include "kernel2.cu"
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
no_of_nodes=0;
edge_list_size=0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp)
fclose(fp);
printf("Read File\n");
//Copy the Node list to device memory
Node* d_graph_nodes;
cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
cudaMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
cudaMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
bool* d_updating_graph_mask;
cudaMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
cudaMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
// allocate mem for the result on host side
int8_t* h_cost = (int8_t*) malloc( sizeof(int8_t)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int8_t* d_cost;
cudaMalloc( (void**) &d_cost, sizeof(int8_t)*no_of_nodes);
cudaMemcpy( d_cost, h_cost, sizeof(int8_t)*no_of_nodes, cudaMemcpyHostToDevice) ;
//make a bool to check if the execution is over
bool *d_over;
cudaMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ;
Kernel<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
// check if kernel execution generated and error
Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
cudaMemcpy( h_cost, d_cost, sizeof(int8_t)*no_of_nodes, cudaMemcpyDeviceToHost) ;
//Store the result into a file
FILE *fpo = fopen("result.txt","w");
for(int i=0;i<no_of_nodes;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// cleanup memory
free( h_graph_nodes);
free( h_graph_edges);
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
cudaFree(d_graph_nodes);
cudaFree(d_graph_edges);
cudaFree(d_graph_mask);
cudaFree(d_updating_graph_mask);
cudaFree(d_graph_visited);
cudaFree(d_cost);
}
|
77142bbc30bd250cd157a010e68b2f2b13aae1e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
/* ============================================================ */
/* Stream == 1 */
/* ============================================================ */
/* Flops == 1 */
__global__ void intensity_kernel_1_1(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 2 */
__global__ void intensity_kernel_1_2(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 3 */
__global__ void intensity_kernel_1_3(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 4 */
__global__ void intensity_kernel_1_4(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 5 */
__global__ void intensity_kernel_1_5(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 6 */
__global__ void intensity_kernel_1_6(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 7 */
__global__ void intensity_kernel_1_7(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 8 */
__global__ void intensity_kernel_1_8(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 9 */
__global__ void intensity_kernel_1_9(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 10 */
__global__ void intensity_kernel_1_10(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 12 */
__global__ void intensity_kernel_1_12(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 14 */
__global__ void intensity_kernel_1_14(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 16 */
__global__ void intensity_kernel_1_16(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 20 */
__global__ void intensity_kernel_1_20(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 24 */
__global__ void intensity_kernel_1_24(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 28 */
__global__ void intensity_kernel_1_28(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 32 */
__global__ void intensity_kernel_1_32(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 40 */
__global__ void intensity_kernel_1_40(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 48 */
__global__ void intensity_kernel_1_48(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 56 */
__global__ void intensity_kernel_1_56(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 64 */
__global__ void intensity_kernel_1_64(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 72 */
__global__ void intensity_kernel_1_72(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 128 */
__global__ void intensity_kernel_1_128(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 160 */
__global__ void intensity_kernel_1_160(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 256 */
__global__ void intensity_kernel_1_256(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 512 */
__global__ void intensity_kernel_1_512(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 1024 */
__global__ void intensity_kernel_1_1024(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 2048 */
__global__ void intensity_kernel_1_2048(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 4096 */
__global__ void intensity_kernel_1_4096(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* ============================================================ */
| 77142bbc30bd250cd157a010e68b2f2b13aae1e2.cu | #include "common.h"
/* ============================================================ */
/* Stream == 1 */
/* ============================================================ */
/* Flops == 1 */
__global__ void intensity_kernel_1_1(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 2 */
__global__ void intensity_kernel_1_2(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 3 */
__global__ void intensity_kernel_1_3(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 4 */
__global__ void intensity_kernel_1_4(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 5 */
__global__ void intensity_kernel_1_5(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 6 */
__global__ void intensity_kernel_1_6(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 7 */
__global__ void intensity_kernel_1_7(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 8 */
__global__ void intensity_kernel_1_8(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 9 */
__global__ void intensity_kernel_1_9(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 10 */
__global__ void intensity_kernel_1_10(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 12 */
__global__ void intensity_kernel_1_12(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 14 */
__global__ void intensity_kernel_1_14(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 16 */
__global__ void intensity_kernel_1_16(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 20 */
__global__ void intensity_kernel_1_20(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 24 */
__global__ void intensity_kernel_1_24(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 28 */
__global__ void intensity_kernel_1_28(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 32 */
__global__ void intensity_kernel_1_32(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 40 */
__global__ void intensity_kernel_1_40(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 48 */
__global__ void intensity_kernel_1_48(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 56 */
__global__ void intensity_kernel_1_56(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 64 */
__global__ void intensity_kernel_1_64(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 72 */
__global__ void intensity_kernel_1_72(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 128 */
__global__ void intensity_kernel_1_128(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 160 */
__global__ void intensity_kernel_1_160(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 256 */
__global__ void intensity_kernel_1_256(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 512 */
__global__ void intensity_kernel_1_512(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 1024 */
__global__ void intensity_kernel_1_1024(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 2048 */
__global__ void intensity_kernel_1_2048(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* Flops == 4096 */
__global__ void intensity_kernel_1_4096(int num_threads, int num_streams, TYPE* in, TYPE* out)
{
uint tid = threadIdx.x + blockIdx.x * blockDim.x;
TYPE tmp1;
float x, y, z, w;
if(tid < num_threads) {
tmp1 = in[tid];
x = tmp1.x;
y = tmp1.y;
z = tmp1.z;
w = tmp1.w;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
x = x + x * CONST;
y = y + y * CONST;
z = z + z * CONST;
w = w + w * CONST;
out[tid] = make_float4 (x, y, z, w);
}
}
/* ============================================================ */
|
e9e87fc90dd8bd0080baacc97846f79878b37047.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
//=====================================================================
// MAIN FUNCTION
//=====================================================================
void master(fp timeinst,
fp* initvalu,
fp* parameter,
fp* finavalu,
fp* com,
fp* d_initvalu,
fp* d_finavalu,
fp* d_params,
fp* d_com){
//=====================================================================
// VARIABLES
//=====================================================================
// counters
int i;
// offset pointers
int initvalu_offset_ecc; // 46 points
int initvalu_offset_Dyad; // 15 points
int initvalu_offset_SL; // 15 points
int initvalu_offset_Cyt; // 15 poitns
// cuda
dim3 threads;
dim3 blocks;
//=====================================================================
// execute ECC&CAM kernel - it runs ECC and CAMs in parallel
//=====================================================================
int d_initvalu_mem;
d_initvalu_mem = EQUATIONS * sizeof(fp);
int d_finavalu_mem;
d_finavalu_mem = EQUATIONS * sizeof(fp);
int d_params_mem;
d_params_mem = PARAMETERS * sizeof(fp);
int d_com_mem;
d_com_mem = 3 * sizeof(fp);
hipMemcpy(d_initvalu, initvalu, d_initvalu_mem, hipMemcpyHostToDevice);
hipMemcpy(d_params, parameter, d_params_mem, hipMemcpyHostToDevice);
threads.x = NUMBER_THREADS;
threads.y = 1;
blocks.x = 2;
blocks.y = 1;
allocateReadWriteSets(blocks, threads);
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), 0, 0, timeinst,
d_initvalu,
d_finavalu,
d_params,
d_com);
freeReadWriteSets(blocks, threads);
hipMemcpy(finavalu, d_finavalu, d_finavalu_mem, hipMemcpyDeviceToHost);
hipMemcpy(com, d_com, d_com_mem, hipMemcpyDeviceToHost);
//=====================================================================
// FINAL KERNEL
//=====================================================================
initvalu_offset_ecc = 0; // 46 points
initvalu_offset_Dyad = 46; // 15 points
initvalu_offset_SL = 61; // 15 points
initvalu_offset_Cyt = 76; // 15 poitns
kernel_fin( initvalu,
initvalu_offset_ecc,
initvalu_offset_Dyad,
initvalu_offset_SL,
initvalu_offset_Cyt,
parameter,
finavalu,
com[0],
com[1],
com[2]);
//=====================================================================
// COMPENSATION FOR NANs and INFs
//=====================================================================
for(i=0; i<EQUATIONS; i++){
if (isnan(finavalu[i]) == 1){
finavalu[i] = 0.0001; // for NAN set rate of change to 0.0001
}
else if (isinf(finavalu[i]) == 1){
finavalu[i] = 0.0001; // for INF set rate of change to 0.0001
}
}
}
| e9e87fc90dd8bd0080baacc97846f79878b37047.cu | #include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
//=====================================================================
// MAIN FUNCTION
//=====================================================================
void master(fp timeinst,
fp* initvalu,
fp* parameter,
fp* finavalu,
fp* com,
fp* d_initvalu,
fp* d_finavalu,
fp* d_params,
fp* d_com){
//=====================================================================
// VARIABLES
//=====================================================================
// counters
int i;
// offset pointers
int initvalu_offset_ecc; // 46 points
int initvalu_offset_Dyad; // 15 points
int initvalu_offset_SL; // 15 points
int initvalu_offset_Cyt; // 15 poitns
// cuda
dim3 threads;
dim3 blocks;
//=====================================================================
// execute ECC&CAM kernel - it runs ECC and CAMs in parallel
//=====================================================================
int d_initvalu_mem;
d_initvalu_mem = EQUATIONS * sizeof(fp);
int d_finavalu_mem;
d_finavalu_mem = EQUATIONS * sizeof(fp);
int d_params_mem;
d_params_mem = PARAMETERS * sizeof(fp);
int d_com_mem;
d_com_mem = 3 * sizeof(fp);
cudaMemcpy(d_initvalu, initvalu, d_initvalu_mem, cudaMemcpyHostToDevice);
cudaMemcpy(d_params, parameter, d_params_mem, cudaMemcpyHostToDevice);
threads.x = NUMBER_THREADS;
threads.y = 1;
blocks.x = 2;
blocks.y = 1;
allocateReadWriteSets(blocks, threads);
kernel<<<blocks, threads>>>( timeinst,
d_initvalu,
d_finavalu,
d_params,
d_com);
freeReadWriteSets(blocks, threads);
cudaMemcpy(finavalu, d_finavalu, d_finavalu_mem, cudaMemcpyDeviceToHost);
cudaMemcpy(com, d_com, d_com_mem, cudaMemcpyDeviceToHost);
//=====================================================================
// FINAL KERNEL
//=====================================================================
initvalu_offset_ecc = 0; // 46 points
initvalu_offset_Dyad = 46; // 15 points
initvalu_offset_SL = 61; // 15 points
initvalu_offset_Cyt = 76; // 15 poitns
kernel_fin( initvalu,
initvalu_offset_ecc,
initvalu_offset_Dyad,
initvalu_offset_SL,
initvalu_offset_Cyt,
parameter,
finavalu,
com[0],
com[1],
com[2]);
//=====================================================================
// COMPENSATION FOR NANs and INFs
//=====================================================================
for(i=0; i<EQUATIONS; i++){
if (isnan(finavalu[i]) == 1){
finavalu[i] = 0.0001; // for NAN set rate of change to 0.0001
}
else if (isinf(finavalu[i]) == 1){
finavalu[i] = 0.0001; // for INF set rate of change to 0.0001
}
}
}
|
70174b09f0cc435befd84ca2098708f9809a16d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma region License
/*
* jimsh - An interactive shell for Jim
*
* Copyright 2005 Salvatore Sanfilippo <[email protected]>
* Copyright 2009 Steve Bennett <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE JIM TCL PROJECT ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* JIM TCL PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of the Jim Tcl Project.
*/
#pragma endregion
#include <cuda_runtimecu.h>
#include <sentinel.h>
#include <stdiocu.h>
#include <stdlibcu.h>
#include <stringcu.h>
#include <jim.h>
#include <jimautoconf.h>
#pragma region Name
__device__ static void JimSetArgv(Jim_Interp *interp, int argc, char *const argv[]) {
Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0);
for (int n = 0; n < argc; n++)
Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, argv[n], -1));
Jim_SetVariableStr(interp, "argv", listObj);
Jim_SetVariableStr(interp, "argc", Jim_NewIntObj(interp, argc));
}
__device__ static void JimPrintErrorMessage(Jim_Interp *interp) {
Jim_MakeErrorMessage(interp);
printf("%s\n", Jim_String(Jim_GetResult(interp)));
}
// From initjimsh.tcl
extern __device__ int Jim_initjimshInit(Jim_Interp *interp);
#pragma endregion
// SAMPLE COMMAND
#if 0
__device__ int SampleCommand(ClientData clientData, Jim_Interp *interp, int argc, Jim_Obj *const argv[]) {
if (argc != 2) {
Jim_WrongNumArgs(interp, 1, argv, "msg");
return JIM_ERROR;
}
printf("%s\n", Jim_String(argv[1]));
return JIM_OK;
}
#endif
#pragma region Startup + Shutdown
struct PrimaryData {
Jim_Interp *interp;
int retcode;
};
struct PrimaryData h_dataP;
// MAIN-INIT
#if __HIPCC__
__device__ struct PrimaryData d_dataP;
void D_DATAP() { cudaErrorCheck(hipMemcpyToSymbol(d_dataP, &h_dataP, sizeof(h_dataP))); }
void H_DATAP() { cudaErrorCheck(hipMemcpyFromSymbol(&h_dataP, d_dataP, sizeof(h_dataP))); }
__global__ void g_MainInit(int argc, char *const argv[]);
static int MainInit(int argc, char *const argv[]) {
memset(&h_dataP, 0, sizeof(h_dataP));
//cudaErrorCheck(hipSetDeviceFlags(hipDeviceMapHost | hipDeviceLmemResizeToMax));
cudaErrorCheck(hipSetDevice(gpuGetMaxGflopsDevice()));
cudaErrorCheck(hipDeviceSetLimit(hipLimitStackSize, 1024 * 5));
sentinelServerInitialize();
//
char **d_argv = cudaDeviceTransferStringArray(argc, argv);
D_DATAP();hipLaunchKernelGGL(( g_MainInit), dim3(1),dim3(1), 0, 0, argc, d_argv); cudaErrorCheck(hipDeviceSynchronize()); H_DATAP();
hipFree(d_argv);
return h_dataP.retcode;
}
#define _dataP d_dataP
__global__ void g_MainInit(int argc, char *const argv[]) {
#else
#define _dataP h_dataP
static int MainInit(int argc, char *const argv[]) {
memset(&h_dataP, 0, sizeof(h_dataP));
#endif
// Create and initialize the interpreter
Jim_Interp *interp = _dataP.interp = Jim_CreateInterp();
Jim_RegisterCoreCommands(interp);
// SAMPLE COMMAND
#if 0
Jim_CreateCommand(interp, "sample", SampleCommand, nullptr, nullptr);
#endif
// Register static extensions
if (Jim_InitStaticExtensions(interp) != JIM_OK)
JimPrintErrorMessage(interp);
//
Jim_SetVariableStrWithStr(interp, "jim::argv0", argv[0]);
Jim_SetVariableStrWithStr(interp, JIM_INTERACTIVE, argc == 1 ? "1" : "0");
int retcode = Jim_initjimshInit(interp);
if (argc == 1) {
if (retcode == JIM_ERROR)
JimPrintErrorMessage(interp);
if (retcode != JIM_EXIT)
JimSetArgv(interp, 0, NULL);
}
else {
if (argc > 2 && !strcmp(argv[1], "-e")) {
JimSetArgv(interp, argc - 3, argv + 3);
retcode = Jim_Eval(interp, argv[2]);
if (retcode != JIM_ERROR)
printf("%s\n", Jim_String(Jim_GetResult(interp)));
}
else {
Jim_SetVariableStr(interp, "argv0", Jim_NewStringObj(interp, argv[1], -1));
JimSetArgv(interp, argc - 2, argv + 2);
retcode = Jim_EvalFile(interp, argv[1]);
}
if (retcode == JIM_ERROR)
JimPrintErrorMessage(interp);
}
#if __HIPCC__
_dataP.retcode = retcode;
#else
return retcode;
#endif
}
// MAIN-SHUTDOWN
#if __HIPCC__
__global__ void g_MainShutdown(int retcode);
static int MainShutdown(int retcode) {
D_DATAP();hipLaunchKernelGGL(( g_MainShutdown), dim3(1),dim3(1), 0, 0, retcode); cudaErrorCheck(hipDeviceSynchronize()); H_DATAP();
hipDeviceReset();
sentinelServerShutdown();
return h_dataP.retcode;
}
__global__ void g_MainShutdown(int retcode) {
#else
static int MainShutdown(int retcode) {
#endif
Jim_Interp *interp = _dataP.interp;
if (retcode == JIM_EXIT)
retcode = Jim_GetExitCode(interp);
Jim_FreeInterp(interp);
#if __HIPCC__
_dataP.retcode = retcode;
#else
return retcode;
#endif
}
#pragma endregion
int main(int argc, char *const argv[]) {
if (argc > 1 && !strcmp(argv[1], "--version")) {
printf("%d.%d\n", JIM_VERSION / 100, JIM_VERSION % 100);
return 0;
}
int retcode = MainInit(argc, argv);
if (argc == 1 && retcode != JIM_EXIT)
retcode = Jim_InteractivePrompt(h_dataP.interp);
retcode = MainShutdown(retcode);
if (retcode == JIM_ERROR)
retcode = 1;
else
retcode = 0;
return retcode;
}
| 70174b09f0cc435befd84ca2098708f9809a16d2.cu | #pragma region License
/*
* jimsh - An interactive shell for Jim
*
* Copyright 2005 Salvatore Sanfilippo <[email protected]>
* Copyright 2009 Steve Bennett <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE JIM TCL PROJECT ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* JIM TCL PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of the Jim Tcl Project.
*/
#pragma endregion
#include <cuda_runtimecu.h>
#include <sentinel.h>
#include <stdiocu.h>
#include <stdlibcu.h>
#include <stringcu.h>
#include <jim.h>
#include <jimautoconf.h>
#pragma region Name
__device__ static void JimSetArgv(Jim_Interp *interp, int argc, char *const argv[]) {
Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0);
for (int n = 0; n < argc; n++)
Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, argv[n], -1));
Jim_SetVariableStr(interp, "argv", listObj);
Jim_SetVariableStr(interp, "argc", Jim_NewIntObj(interp, argc));
}
__device__ static void JimPrintErrorMessage(Jim_Interp *interp) {
Jim_MakeErrorMessage(interp);
printf("%s\n", Jim_String(Jim_GetResult(interp)));
}
// From initjimsh.tcl
extern __device__ int Jim_initjimshInit(Jim_Interp *interp);
#pragma endregion
// SAMPLE COMMAND
#if 0
__device__ int SampleCommand(ClientData clientData, Jim_Interp *interp, int argc, Jim_Obj *const argv[]) {
if (argc != 2) {
Jim_WrongNumArgs(interp, 1, argv, "msg");
return JIM_ERROR;
}
printf("%s\n", Jim_String(argv[1]));
return JIM_OK;
}
#endif
#pragma region Startup + Shutdown
struct PrimaryData {
Jim_Interp *interp;
int retcode;
};
struct PrimaryData h_dataP;
// MAIN-INIT
#if __CUDACC__
__device__ struct PrimaryData d_dataP;
void D_DATAP() { cudaErrorCheck(cudaMemcpyToSymbol(d_dataP, &h_dataP, sizeof(h_dataP))); }
void H_DATAP() { cudaErrorCheck(cudaMemcpyFromSymbol(&h_dataP, d_dataP, sizeof(h_dataP))); }
__global__ void g_MainInit(int argc, char *const argv[]);
static int MainInit(int argc, char *const argv[]) {
memset(&h_dataP, 0, sizeof(h_dataP));
//cudaErrorCheck(cudaSetDeviceFlags(cudaDeviceMapHost | cudaDeviceLmemResizeToMax));
cudaErrorCheck(cudaSetDevice(gpuGetMaxGflopsDevice()));
cudaErrorCheck(cudaDeviceSetLimit(cudaLimitStackSize, 1024 * 5));
sentinelServerInitialize();
//
char **d_argv = cudaDeviceTransferStringArray(argc, argv);
D_DATAP(); g_MainInit<<<1,1>>>(argc, d_argv); cudaErrorCheck(cudaDeviceSynchronize()); H_DATAP();
cudaFree(d_argv);
return h_dataP.retcode;
}
#define _dataP d_dataP
__global__ void g_MainInit(int argc, char *const argv[]) {
#else
#define _dataP h_dataP
static int MainInit(int argc, char *const argv[]) {
memset(&h_dataP, 0, sizeof(h_dataP));
#endif
// Create and initialize the interpreter
Jim_Interp *interp = _dataP.interp = Jim_CreateInterp();
Jim_RegisterCoreCommands(interp);
// SAMPLE COMMAND
#if 0
Jim_CreateCommand(interp, "sample", SampleCommand, nullptr, nullptr);
#endif
// Register static extensions
if (Jim_InitStaticExtensions(interp) != JIM_OK)
JimPrintErrorMessage(interp);
//
Jim_SetVariableStrWithStr(interp, "jim::argv0", argv[0]);
Jim_SetVariableStrWithStr(interp, JIM_INTERACTIVE, argc == 1 ? "1" : "0");
int retcode = Jim_initjimshInit(interp);
if (argc == 1) {
if (retcode == JIM_ERROR)
JimPrintErrorMessage(interp);
if (retcode != JIM_EXIT)
JimSetArgv(interp, 0, NULL);
}
else {
if (argc > 2 && !strcmp(argv[1], "-e")) {
JimSetArgv(interp, argc - 3, argv + 3);
retcode = Jim_Eval(interp, argv[2]);
if (retcode != JIM_ERROR)
printf("%s\n", Jim_String(Jim_GetResult(interp)));
}
else {
Jim_SetVariableStr(interp, "argv0", Jim_NewStringObj(interp, argv[1], -1));
JimSetArgv(interp, argc - 2, argv + 2);
retcode = Jim_EvalFile(interp, argv[1]);
}
if (retcode == JIM_ERROR)
JimPrintErrorMessage(interp);
}
#if __CUDACC__
_dataP.retcode = retcode;
#else
return retcode;
#endif
}
// MAIN-SHUTDOWN
#if __CUDACC__
__global__ void g_MainShutdown(int retcode);
static int MainShutdown(int retcode) {
D_DATAP(); g_MainShutdown<<<1,1>>>(retcode); cudaErrorCheck(cudaDeviceSynchronize()); H_DATAP();
cudaDeviceReset();
sentinelServerShutdown();
return h_dataP.retcode;
}
__global__ void g_MainShutdown(int retcode) {
#else
static int MainShutdown(int retcode) {
#endif
Jim_Interp *interp = _dataP.interp;
if (retcode == JIM_EXIT)
retcode = Jim_GetExitCode(interp);
Jim_FreeInterp(interp);
#if __CUDACC__
_dataP.retcode = retcode;
#else
return retcode;
#endif
}
#pragma endregion
int main(int argc, char *const argv[]) {
if (argc > 1 && !strcmp(argv[1], "--version")) {
printf("%d.%d\n", JIM_VERSION / 100, JIM_VERSION % 100);
return 0;
}
int retcode = MainInit(argc, argv);
if (argc == 1 && retcode != JIM_EXIT)
retcode = Jim_InteractivePrompt(h_dataP.interp);
retcode = MainShutdown(retcode);
if (retcode == JIM_ERROR)
retcode = 1;
else
retcode = 0;
return retcode;
}
|
eec74a925494f9457e620f79f4639bae57d1f619.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename T, typename Z>
static __global__ void global_mergeMaxIndex_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<Z*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
Z mIdx(0);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape, length)];;
if (mVal < val)
mIdx = static_cast<Z>(e);
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape, length)] = mIdx;
}
}
template <typename T, typename Z>
static void mergeMaxIndex_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMaxIndex");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeMaxIndex_<T,Z>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMaxIndex(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INDEXING_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeMax_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape, length)];;
if (mVal < val)
mVal = val;
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape, length)] = mVal;
}
}
template<typename T>
static void mergeMax_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMax");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeMax_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMax(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAvg_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape, length)];
}
output[shape::getIndexOffset(e, outputShape, length)] = sum / numArrays;
}
}
template<typename T>
static void mergeAvg_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAvg");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeAvg_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeAvg(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAdd_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape, length)];
}
output[shape::getIndexOffset(e, outputShape, length)] = sum;
}
}
template<typename T>
static void mergeAdd_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAdd");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeAdd_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), NUMERIC_TYPES);
void mergeAdd(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), NUMERIC_TYPES);
}
}
}
} | eec74a925494f9457e620f79f4639bae57d1f619.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename T, typename Z>
static __global__ void global_mergeMaxIndex_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<Z*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
Z mIdx(0);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape, length)];;
if (mVal < val)
mIdx = static_cast<Z>(e);
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape, length)] = mIdx;
}
}
template <typename T, typename Z>
static void mergeMaxIndex_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMaxIndex");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeMaxIndex_<T,Z><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMaxIndex(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INDEXING_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeMax_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape, length)];;
if (mVal < val)
mVal = val;
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape, length)] = mVal;
}
}
template<typename T>
static void mergeMax_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMax");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeMax_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMax(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAvg_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape, length)];
}
output[shape::getIndexOffset(e, outputShape, length)] = sum / numArrays;
}
}
template<typename T>
static void mergeAvg_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAvg");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeAvg_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeAvg(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAdd_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape, length)];
}
output[shape::getIndexOffset(e, outputShape, length)] = sum;
}
}
template<typename T>
static void mergeAdd_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAdd");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeAdd_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), NUMERIC_TYPES);
void mergeAdd(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), NUMERIC_TYPES);
}
}
}
} |
d848a484d08cb632ae39a957a6f584cf6e42a218.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define spmv_NBLOCKS 12*8*21 //22
#define spmv_BLOCK_SIZE 256
#define WARP_SIZE 32
texture<float,1,hipReadModeElementType> tex_vec;
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
hipHostMalloc(newA_ptr, paddedSize * sizeof(float));
hipHostMalloc(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];//tex1Dfetch(tex_vec,col);
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel(const float* val,
const int * cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1];
__shared__ volatile float partialSums[spmv_BLOCK_SIZE];
if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1)
rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x];
__syncthreads();
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] * tex1Dfetch(tex_vec,col);//vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
hipSetDevice(2);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero
float maxval = 200.0;
hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float));
hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int));
hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice);
hipBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float));
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE));
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(spmv_BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost);
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
| d848a484d08cb632ae39a957a6f584cf6e42a218.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define spmv_NBLOCKS 12*8*21 //22
#define spmv_BLOCK_SIZE 256
#define WARP_SIZE 32
texture<float,1,cudaReadModeElementType> tex_vec;
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
cudaMallocHost(newA_ptr, paddedSize * sizeof(float));
cudaMallocHost(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];//tex1Dfetch(tex_vec,col);
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel(const float* val,
const int * cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1];
__shared__ volatile float partialSums[spmv_BLOCK_SIZE];
if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1)
rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x];
__syncthreads();
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] * tex1Dfetch(tex_vec,col);//vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
cudaSetDevice(2);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero
float maxval = 200.0;
cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float));
cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int));
cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float));
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE));
spmv_kernel <<<spmv_grid, spmv_BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost);
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
7a458ad6d030805a509ad2dcada75bf9351abc79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file test_warp_tools.cu
* \brief CUDA kernels for testing warp-level primitives.
*/
#include "test_warp_tools.cuh"
#include "hoomd/WarpTools.cuh"
#define BLOCK_SIZE 32
//! Performs an iterative warp reduction on a data set using \a tpp threads per row.
/*!
* \param d_data Data to scan as a N x width matrix.
* \param d_reduce Output of the reduction at each step.
* \param d_sum Total sum for each row of data.
* \param N Number of rows in data.
* \param width Number of entries to scan.
* \param reduce_idx Indexer for saving intermediate results of reduction.
* \tparam tpp Number of threads to use per row in \a d_data .
*
* The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and \a width entries
* per row. This sub-warp group then iterates through the data in the row, performing a reduction at each iteration.
* The result of the reduction is saved into \a d_reduce for each iteration. The total sum is also accumulated
* into \a d_sum.
*
* This test kernel is more complicated than the basic tests that CUB runs for WarpReduce. The reason for this is to
* emulate a use-case in HOOMD, namely the force accumulation using multiple threads per particle.
*/
template<int tpp>
__global__ void warp_reduce_kernel(const int* d_data,
int* d_reduce,
int* d_sum,
const unsigned int N,
const unsigned int width,
const Index2D reduce_idx)
{
// thread id in the global grid
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// row of data that this thread operates on
const unsigned int idx = tid / tpp;
// index of thread within the sub warp
const unsigned int cta_idx = threadIdx.x % tpp;
if (idx >= N) return;
int sum(0), cntr(0);
unsigned int offset = cta_idx;
bool done = false;
while (!done)
{
// load in data
int thread_data;
if (offset < width)
{
thread_data = d_data[idx * width + offset];
}
else
{
thread_data = 0;
done = true;
}
offset += tpp;
// only scan if sub warp still has work to do
done = hoomd::detail::WarpScan<bool,tpp>().Broadcast(done, 0);
if (!done)
{
// scan the thread data
int sum_iter = hoomd::detail::WarpReduce<int,tpp>().Sum(thread_data);
// save reduce result for this iteration
if (cta_idx == 0)
d_reduce[reduce_idx(idx,cntr)] = sum_iter;
// accumulate total sum
sum += sum_iter;
++cntr;
}
}
// thread 0 writes out accumulated sum
if (cta_idx == 0)
{
d_sum[idx] = sum;
}
}
// Dispatch for warp reduction based on requested threads per particle.
/*!
* \param params Reduction parameters.
* \tparam tpp Number of threads to try to launch.
*
* This recursive template compiles the kernel for all valid threads per particle (powers of 2 from 1 to 32), and only
* executes the kernel for the number of threads that is equal to the value specified in \a params.
*/
template<int tpp>
void warp_reduce_launcher(const reduce_params& params)
{
if (tpp == params.tpp)
{
dim3 grid((params.N*tpp+BLOCK_SIZE-1)/BLOCK_SIZE);
hipLaunchKernelGGL(( warp_reduce_kernel<tpp>), dim3(grid), dim3(BLOCK_SIZE), 0, 0, params.data, params.reduce, params.sum, params.N, params.width, params.reduce_idx);
}
else
{
warp_reduce_launcher<tpp/2>(params);
}
}
//! Terminates the recursive template.
template<>
void warp_reduce_launcher<0>(const reduce_params& params)
{
}
/*!
* \params Scan parameters.
*
* The scan results are first memset to zero.
*/
void warp_reduce(const reduce_params& params)
{
hipMemset(params.reduce, 0, params.reduce_idx.getNumElements() * sizeof(int));
hipMemset(params.sum, 0, params.N * sizeof(int));
warp_reduce_launcher<32>(params);
}
//! Performs an iterative warp scan on a data set using \a tpp threads per row.
/*!
* \param d_data Data to scan as a N x width matrix.
* \param d_scan Output of the scan at each step of sum.
* \param d_sum Total sum for each row of data.
* \param N Number of rows in data.
* \param width Number of entries to scan.
* \param scan_idx Indexer for saving intermediate results of scan.
* \tparam tpp Number of threads to use per row in \a d_data .
*
* The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and \a width entries
* per row. This sub-warp group then iterates through the data in the row, performing an exclusive sum at each iteration.
* The result of the scan is saved into \a d_scan for each thread along with the aggregate at each iteration. The total
* sum is also accumulated into \a d_sum.
*
* This test kernel is more complicated than the basic tests that CUB runs for WarpScan. The reason for this is to
* emulate a use-case in HOOMD, namely the neighbor list generation using multiple threads per particle.
*/
template<int tpp>
__global__ void warp_scan_kernel(const int* d_data,
int* d_scan,
int* d_sum,
const unsigned int N,
const unsigned int width,
const Index3D scan_idx)
{
// thread id in the global grid
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// row of data that this thread operates on
const unsigned int idx = tid / tpp;
// index of thread within the sub warp
const unsigned int cta_idx = threadIdx.x % tpp;
if (idx >= N) return;
int sum(0), cntr(0);
unsigned int offset = cta_idx;
bool done = false;
while (!done)
{
// load in data
int thread_data;
if (offset < width)
{
thread_data = d_data[idx * width + offset];
}
else
{
thread_data = 0;
done = true;
}
offset += tpp;
// only scan if sub warp still has work to do
done = hoomd::detail::WarpScan<bool,tpp>().Broadcast(done, 0);
if (!done)
{
// scan the thread data
int sum_iter(0);
hoomd::detail::WarpScan<int,tpp>().ExclusiveSum(thread_data, thread_data, sum_iter);
// save scan result for this iteration
d_scan[scan_idx(idx,cta_idx,cntr)] = thread_data;
if (cta_idx == 0)
d_scan[scan_idx(idx,tpp,cntr)] = sum_iter;
// accumulate total sum
sum += sum_iter;
++cntr;
}
}
// thread 0 writes out accumulated sum
if (cta_idx == 0)
{
d_sum[idx] = sum;
}
}
// Dispatch for warp scan based on requested threads per particle.
/*!
* \param params Scan parameters.
* \tparam tpp Number of threads to try to launch.
*
* This recursive template compiles the kernel for all valid threads per particle (powers of 2 from 1 to 32), and only
* executes the kernel for the number of threads that is equal to the value specified in \a params.
*/
template<int tpp>
void warp_scan_launcher(const scan_params& params)
{
if (tpp == params.tpp)
{
dim3 grid((params.N*tpp+BLOCK_SIZE-1)/BLOCK_SIZE);
hipLaunchKernelGGL(( warp_scan_kernel<tpp>), dim3(grid), dim3(BLOCK_SIZE), 0, 0, params.data, params.scan, params.sum, params.N, params.width, params.scan_idx);
}
else
{
warp_scan_launcher<tpp/2>(params);
}
}
//! Terminates the recursive template.
template<>
void warp_scan_launcher<0>(const scan_params& params)
{
}
/*!
* \params Scan parameters.
*
* The scan results are first memset to zero.
*/
void warp_scan(const scan_params& params)
{
hipMemset(params.scan, 0, params.scan_idx.getNumElements() * sizeof(int));
hipMemset(params.sum, 0, params.N * sizeof(int));
warp_scan_launcher<32>(params);
}
| 7a458ad6d030805a509ad2dcada75bf9351abc79.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file test_warp_tools.cu
* \brief CUDA kernels for testing warp-level primitives.
*/
#include "test_warp_tools.cuh"
#include "hoomd/WarpTools.cuh"
#define BLOCK_SIZE 32
//! Performs an iterative warp reduction on a data set using \a tpp threads per row.
/*!
* \param d_data Data to scan as a N x width matrix.
* \param d_reduce Output of the reduction at each step.
* \param d_sum Total sum for each row of data.
* \param N Number of rows in data.
* \param width Number of entries to scan.
* \param reduce_idx Indexer for saving intermediate results of reduction.
* \tparam tpp Number of threads to use per row in \a d_data .
*
* The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and \a width entries
* per row. This sub-warp group then iterates through the data in the row, performing a reduction at each iteration.
* The result of the reduction is saved into \a d_reduce for each iteration. The total sum is also accumulated
* into \a d_sum.
*
* This test kernel is more complicated than the basic tests that CUB runs for WarpReduce. The reason for this is to
* emulate a use-case in HOOMD, namely the force accumulation using multiple threads per particle.
*/
template<int tpp>
__global__ void warp_reduce_kernel(const int* d_data,
int* d_reduce,
int* d_sum,
const unsigned int N,
const unsigned int width,
const Index2D reduce_idx)
{
// thread id in the global grid
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// row of data that this thread operates on
const unsigned int idx = tid / tpp;
// index of thread within the sub warp
const unsigned int cta_idx = threadIdx.x % tpp;
if (idx >= N) return;
int sum(0), cntr(0);
unsigned int offset = cta_idx;
bool done = false;
while (!done)
{
// load in data
int thread_data;
if (offset < width)
{
thread_data = d_data[idx * width + offset];
}
else
{
thread_data = 0;
done = true;
}
offset += tpp;
// only scan if sub warp still has work to do
done = hoomd::detail::WarpScan<bool,tpp>().Broadcast(done, 0);
if (!done)
{
// scan the thread data
int sum_iter = hoomd::detail::WarpReduce<int,tpp>().Sum(thread_data);
// save reduce result for this iteration
if (cta_idx == 0)
d_reduce[reduce_idx(idx,cntr)] = sum_iter;
// accumulate total sum
sum += sum_iter;
++cntr;
}
}
// thread 0 writes out accumulated sum
if (cta_idx == 0)
{
d_sum[idx] = sum;
}
}
// Dispatch for warp reduction based on requested threads per particle.
/*!
* \param params Reduction parameters.
* \tparam tpp Number of threads to try to launch.
*
* This recursive template compiles the kernel for all valid threads per particle (powers of 2 from 1 to 32), and only
* executes the kernel for the number of threads that is equal to the value specified in \a params.
*/
template<int tpp>
void warp_reduce_launcher(const reduce_params& params)
{
if (tpp == params.tpp)
{
dim3 grid((params.N*tpp+BLOCK_SIZE-1)/BLOCK_SIZE);
warp_reduce_kernel<tpp><<<grid, BLOCK_SIZE>>>(params.data, params.reduce, params.sum, params.N, params.width, params.reduce_idx);
}
else
{
warp_reduce_launcher<tpp/2>(params);
}
}
//! Terminates the recursive template.
template<>
void warp_reduce_launcher<0>(const reduce_params& params)
{
}
/*!
* \params Scan parameters.
*
* The scan results are first memset to zero.
*/
void warp_reduce(const reduce_params& params)
{
cudaMemset(params.reduce, 0, params.reduce_idx.getNumElements() * sizeof(int));
cudaMemset(params.sum, 0, params.N * sizeof(int));
warp_reduce_launcher<32>(params);
}
//! Performs an iterative warp scan on a data set using \a tpp threads per row.
/*!
* \param d_data Data to scan as a N x width matrix.
* \param d_scan Output of the scan at each step of sum.
* \param d_sum Total sum for each row of data.
* \param N Number of rows in data.
* \param width Number of entries to scan.
* \param scan_idx Indexer for saving intermediate results of scan.
* \tparam tpp Number of threads to use per row in \a d_data .
*
* The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and \a width entries
* per row. This sub-warp group then iterates through the data in the row, performing an exclusive sum at each iteration.
* The result of the scan is saved into \a d_scan for each thread along with the aggregate at each iteration. The total
* sum is also accumulated into \a d_sum.
*
* This test kernel is more complicated than the basic tests that CUB runs for WarpScan. The reason for this is to
* emulate a use-case in HOOMD, namely the neighbor list generation using multiple threads per particle.
*/
template<int tpp>
__global__ void warp_scan_kernel(const int* d_data,
int* d_scan,
int* d_sum,
const unsigned int N,
const unsigned int width,
const Index3D scan_idx)
{
// thread id in the global grid
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// row of data that this thread operates on
const unsigned int idx = tid / tpp;
// index of thread within the sub warp
const unsigned int cta_idx = threadIdx.x % tpp;
if (idx >= N) return;
int sum(0), cntr(0);
unsigned int offset = cta_idx;
bool done = false;
while (!done)
{
// load in data
int thread_data;
if (offset < width)
{
thread_data = d_data[idx * width + offset];
}
else
{
thread_data = 0;
done = true;
}
offset += tpp;
// only scan if sub warp still has work to do
done = hoomd::detail::WarpScan<bool,tpp>().Broadcast(done, 0);
if (!done)
{
// scan the thread data
int sum_iter(0);
hoomd::detail::WarpScan<int,tpp>().ExclusiveSum(thread_data, thread_data, sum_iter);
// save scan result for this iteration
d_scan[scan_idx(idx,cta_idx,cntr)] = thread_data;
if (cta_idx == 0)
d_scan[scan_idx(idx,tpp,cntr)] = sum_iter;
// accumulate total sum
sum += sum_iter;
++cntr;
}
}
// thread 0 writes out accumulated sum
if (cta_idx == 0)
{
d_sum[idx] = sum;
}
}
// Dispatch for warp scan based on requested threads per particle.
/*!
* \param params Scan parameters.
* \tparam tpp Number of threads to try to launch.
*
* This recursive template compiles the kernel for all valid threads per particle (powers of 2 from 1 to 32), and only
* executes the kernel for the number of threads that is equal to the value specified in \a params.
*/
template<int tpp>
void warp_scan_launcher(const scan_params& params)
{
if (tpp == params.tpp)
{
dim3 grid((params.N*tpp+BLOCK_SIZE-1)/BLOCK_SIZE);
warp_scan_kernel<tpp><<<grid, BLOCK_SIZE>>>(params.data, params.scan, params.sum, params.N, params.width, params.scan_idx);
}
else
{
warp_scan_launcher<tpp/2>(params);
}
}
//! Terminates the recursive template.
template<>
void warp_scan_launcher<0>(const scan_params& params)
{
}
/*!
* \params Scan parameters.
*
* The scan results are first memset to zero.
*/
void warp_scan(const scan_params& params)
{
cudaMemset(params.scan, 0, params.scan_idx.getNumElements() * sizeof(int));
cudaMemset(params.sum, 0, params.N * sizeof(int));
warp_scan_launcher<32>(params);
}
|
8e36e286027f2388a4a06292d5bd3fc184442c3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <memory>
#include <utility>
#include <vector>
#include <map>
#include <functional>
#include <algorithm>
#include "thrust/functional.h"
#include "thrust/sort.h"
#include "boost/iterator/counting_iterator.hpp"
#include "modules/perception/camera/lib/obstacle/detector/yolo/region_output.h"
#include "modules/perception/base/object_types.h"
#include "modules/perception/camera/lib/obstacle/detector/yolo/object_maintainer.h"
namespace apollo {
namespace perception {
namespace camera {
__host__ __device__
float sigmoid_gpu(float x) {
return 1.0 / (1.0 + exp(-x));
}
__host__ __device__
float bbox_size_gpu(const float *bbox,
const bool normalized) {
if (bbox[2] <= bbox[0] || bbox[3] <= bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0.f; // NOLINT
} else {
const float width = bbox[2] - bbox[0];
const float height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
__host__ __device__
float jaccard_overlap_gpu(const float *bbox1,
const float *bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return float(0.); // NOLINT
} else {
const float inter_xmin = max(bbox1[0], bbox2[0]);
const float inter_ymin = max(bbox1[1], bbox2[1]);
const float inter_xmax = min(bbox1[2], bbox2[2]);
const float inter_ymax = min(bbox1[3], bbox2[3]);
const float inter_width = inter_xmax - inter_xmin;
const float inter_height = inter_ymax - inter_ymin;
const float inter_size = inter_width * inter_height;
const float bbox1_size = bbox_size_gpu(bbox1, true);
const float bbox2_size = bbox_size_gpu(bbox2, true);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
__global__ void get_object_kernel(int n,
const float *loc_data,
const float *obj_data,
const float *cls_data,
const float *ori_data,
const float *dim_data,
const float *lof_data,
const float *lor_data,
const float *area_id_data,
const float *visible_ratio_data,
const float *cut_off_ratio_data,
const float *brvis_data,
const float *brswt_data,
const float *ltvis_data,
const float *ltswt_data,
const float *rtvis_data,
const float *rtswt_data,
const float *anchor_data,
const float *expand_data,
int width,
int height,
int num_anchors,
int num_classes,
float confidence_threshold,
float light_vis_conf_threshold,
float light_swt_conf_threshold,
bool with_box3d,
bool with_frbox,
bool with_lights,
bool with_ratios,
bool multi_scale,
int num_areas,
float *res_box_data,
float *res_cls_data,
int res_cls_offset,
int all_scales_num_candidates) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n);
i += blockDim.x * gridDim.x) {
int box_block = kBoxBlockSize;
int idx = i;
int c = idx % num_anchors;
idx = idx / num_anchors;
int w = idx % width;
idx = idx / width;
int h = idx;
int loc_index = (h * width + w) * num_anchors + c;
int offset_loc = loc_index * 4;
int offset_cls = loc_index * num_classes;
float scale = obj_data[loc_index];
float cx = (w + sigmoid_gpu(loc_data[offset_loc + 0])) / width;
float cy = (h + sigmoid_gpu(loc_data[offset_loc + 1])) / height;
float hw =
exp(max(minExpPower, min(loc_data[offset_loc + 2], maxExpPower))) *
anchor_data[2 * c] / width * 0.5;
float hh =
exp(max(minExpPower, min(loc_data[offset_loc + 3], maxExpPower))) *
anchor_data[2 * c + 1] / height * 0.5;
float max_prob = 0.f;
int max_index = 0;
for (int k = 0; k < num_classes; ++k) {
float prob = cls_data[offset_cls + k] * scale;
res_cls_data[k * all_scales_num_candidates
+ res_cls_offset + i] = prob;
if (prob > max_prob) {
max_prob = prob;
max_index = k;
}
}
res_cls_data[num_classes * all_scales_num_candidates
+ res_cls_offset + i] = max_prob;
auto &&dst_ptr = res_box_data + i * box_block;
hw += expand_data[max_index];
dst_ptr[0] = cx - hw;
dst_ptr[1] = cy - hh;
dst_ptr[2] = cx + hw;
dst_ptr[3] = cy + hh;
if (with_box3d) {
int offset_ori = loc_index * 2;
dst_ptr[4] = atan2(ori_data[offset_ori + 1], ori_data[offset_ori]);
int offset_dim = loc_index * 3;
if (multi_scale){
offset_dim = loc_index * num_classes * 3 + max_index * 3;
}
dst_ptr[5] = dim_data[offset_dim + 0];
dst_ptr[6] = dim_data[offset_dim + 1];
dst_ptr[7] = dim_data[offset_dim + 2];
}
if (with_frbox) {
{
int offset_lof = loc_index * 4;
auto &&src_ptr = lof_data + offset_lof;
auto sb_x = src_ptr[0] * hw * 2 + cx;
auto sb_y = src_ptr[1] * hh * 2 + cy;
auto sb_hw = exp(src_ptr[2]) * hw;
auto sb_hh = exp(src_ptr[3]) * hh;
dst_ptr[8] = sb_x - sb_hw;
dst_ptr[9] = sb_y - sb_hh;
dst_ptr[10] = sb_x + sb_hw;
dst_ptr[11] = sb_y + sb_hh;
}
{
int offset_lor = loc_index * 4;
auto &&src_ptr = lor_data + offset_lor;
auto sb_x = src_ptr[0] * hw * 2 + cx;
auto sb_y = src_ptr[1] * hh * 2 + cy;
auto sb_hw = exp(src_ptr[2]) * hw;
auto sb_hh = exp(src_ptr[3]) * hh;
dst_ptr[12] = sb_x - sb_hw;
dst_ptr[13] = sb_y - sb_hh;
dst_ptr[14] = sb_x + sb_hw;
dst_ptr[15] = sb_y + sb_hh;
}
}
if (with_lights) {
dst_ptr[16] = sigmoid_gpu(brvis_data[loc_index]);
dst_ptr[17] = sigmoid_gpu(brswt_data[loc_index]);
dst_ptr[18] = sigmoid_gpu(ltvis_data[loc_index]);
dst_ptr[19] = sigmoid_gpu(ltswt_data[loc_index]);
dst_ptr[20] = sigmoid_gpu(rtvis_data[loc_index]);
dst_ptr[21] = sigmoid_gpu(rtswt_data[loc_index]);
dst_ptr[16] = dst_ptr[16] > light_vis_conf_threshold ? dst_ptr[16] : 0;
dst_ptr[18] = dst_ptr[18] > light_vis_conf_threshold ? dst_ptr[18] : 0;
dst_ptr[20] = dst_ptr[20] > light_vis_conf_threshold ? dst_ptr[20] : 0;
float swt_score = 0;
swt_score = dst_ptr[16] * dst_ptr[17];
dst_ptr[17] = swt_score > light_swt_conf_threshold ? swt_score : 0;
swt_score = dst_ptr[18] * dst_ptr[19];
dst_ptr[19] = swt_score > light_swt_conf_threshold ? swt_score : 0;
swt_score = dst_ptr[20] * dst_ptr[21];
dst_ptr[21] = swt_score > light_swt_conf_threshold ? swt_score : 0;
}
if (with_ratios) {
// 0~3: cos2, left, visa, visb
auto vis_pred = visible_ratio_data + loc_index * 4;
auto vis_ptr = dst_ptr + 22;
vis_ptr[0] = vis_ptr[1] = vis_ptr[2] = vis_ptr[3] = 0;
const float hi_th = 0.75;
const float lo_th = 1.f - hi_th;
if (vis_pred[2] >= hi_th && vis_pred[3] >= hi_th) { // 2 (1, 3)
vis_ptr[0] = vis_pred[0];
vis_ptr[1] = 1 - vis_pred[0];
} else if (vis_pred[2] <= lo_th && vis_pred[3] >= hi_th) { // 4 (3, 5)
vis_ptr[2] = vis_pred[0];
vis_ptr[1] = 1 - vis_pred[0];
} else if (vis_pred[2] <= lo_th && vis_pred[3] <= lo_th) { // 6 (5, 7)
vis_ptr[2] = vis_pred[0];
vis_ptr[3] = 1 - vis_pred[0];
} else if (vis_pred[2] >= hi_th && vis_pred[3] <= lo_th) { // 8 (7, 1)
vis_ptr[0] = vis_pred[0];
vis_ptr[3] = 1 - vis_pred[0];
} else {
vis_ptr[2] = vis_pred[0];
if (vis_pred[1] > 0.5) {
vis_ptr[1] = 1 - vis_pred[0];
} else {
vis_ptr[3] = 1 - vis_pred[0];
}
}
int offset_cut = loc_index * 4;
dst_ptr[26] = cut_off_ratio_data[offset_cut + 0];
dst_ptr[27] = cut_off_ratio_data[offset_cut + 1];
dst_ptr[28] = cut_off_ratio_data[offset_cut + 2];
dst_ptr[29] = cut_off_ratio_data[offset_cut + 3];
}
if (num_areas > 0) {
int offset_area_id = loc_index * num_areas;
int max_area_id = 0;
for (int area_id = 1; area_id < num_areas; ++area_id) {
if (area_id_data[offset_area_id + area_id] >
area_id_data[offset_area_id + max_area_id]) {
max_area_id = area_id;
}
}
dst_ptr[30] = max_area_id + 1;
dst_ptr[31] = area_id_data[offset_area_id + max_area_id];
}
}
}
__global__ void get_rois_kernel(int num_bboxes,
const float *loc_data,
const float *obj_data,
const float *anchor_data,
int width,
int height,
int num_anchors,
float confidence_threshold,
float *conf_data,
float *bbox_data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_bboxes) {
int offset_obj = idx;
int offset_loc = idx * 4;
int c = idx % num_anchors;
idx /= num_anchors;
int w = idx % width;
idx /= width;
int h = idx;
float cx = (w + sigmoid_gpu(loc_data[offset_loc + 0])) / width;
float cy = (h + sigmoid_gpu(loc_data[offset_loc + 1])) / height;
float hw =
exp(loc_data[offset_loc + 2]) * anchor_data[2 * c + 0] / width * 0.5;
float hh =
exp(loc_data[offset_loc + 3]) * anchor_data[2 * c + 1] / height * 0.5;
const float &conf = obj_data[offset_obj];
conf_data[offset_obj] = conf > confidence_threshold ? conf : 0;
auto &&curr_bbox = bbox_data + offset_loc;
curr_bbox[0] = cx - hw;
curr_bbox[1] = cy - hh;
curr_bbox[2] = cx + hw;
curr_bbox[3] = cy + hh;
}
}
__global__ void compute_overlapped_by_idx_kernel(
const int nthreads,
const float *bbox_data,
const int bbox_step,
const float overlap_threshold,
const int *idx,
const int num_idx,
bool *overlapped_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < (nthreads); index += blockDim.x * gridDim.x) {
const int j = index % num_idx;
const int i = index / num_idx;
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * bbox_step;
const int start_loc_j = idx[j] * bbox_step;
const float overlap = jaccard_overlap_gpu(bbox_data + start_loc_i,
bbox_data + start_loc_j);
overlapped_data[index] = overlap > overlap_threshold;
}
}
void compute_overlapped_by_idx_gpu(const int nthreads,
const float *bbox_data,
const int bbox_step,
const float overlap_threshold,
const int *idx,
const int num_idx,
bool *overlapped_data,
const hipStream_t &stream) {
// NOLINT_NEXT_LINE(whitespace/operators)
const int thread_size = 512;
int block_size = (nthreads + thread_size - 1) / thread_size;
compute_overlapped_by_idx_kernel << < block_size, thread_size, 0, stream >>
> (
nthreads, bbox_data, bbox_step, overlap_threshold, idx, num_idx,
overlapped_data);
}
void apply_nms_gpu(const float *bbox_data,
const float *conf_data,
const std::vector<int> &origin_indices,
const int bbox_step,
const float confidence_threshold,
const int top_k,
const float nms_threshold,
std::vector<int> *indices,
base::Blob<bool> *overlapped,
base::Blob<int> *idx_sm,
const hipStream_t &stream) {
// Keep part of detections whose scores are higher than confidence threshold.
std::vector<int> idx;
std::vector<float> confidences;
for (auto i : origin_indices) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<float>());
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
int *idx_data = (idx_sm->mutable_cpu_data());
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
overlapped->Reshape(std::vector<int>{num_remain, num_remain});
bool *overlapped_data = (overlapped->mutable_gpu_data());
compute_overlapped_by_idx_gpu(overlapped->count(),
bbox_data,
bbox_step,
nms_threshold,
idx_sm->gpu_data(),
num_remain,
overlapped_data,
stream);
// Do non-maximum suppression based on overlapped results.
const bool *overlapped_results = (const bool *) overlapped->cpu_data();
std::vector<int> selected_indices;
apply_nms(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (size_t i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
void apply_nms(const bool *overlapped,
const int num,
std::vector<int> *indices) {
std::vector<int> index_vec(boost::counting_iterator<int>(0),
boost::counting_iterator<int>(num));
// Do nms.
indices->clear();
while (index_vec.size() != 0) {
// Get the current highest score box.
int best_idx = index_vec.front();
indices->push_back(best_idx);
// Erase the best box.
index_vec.erase(index_vec.begin());
for (std::vector<int>::iterator it = index_vec.begin();
it != index_vec.end();) {
int cur_idx = *it;
// Remove it if necessary
if (overlapped[best_idx * num + cur_idx]) {
it = index_vec.erase(it);
} else {
++it;
}
}
}
}
const float *get_gpu_data(bool flag, const base::Blob<float> &blob) {
return flag ? blob.gpu_data() : nullptr;
}
void get_objects_gpu(const YoloBlobs &yolo_blobs,
const hipStream_t &stream,
const std::vector<base::ObjectSubType> &types,
const NMSParam &nms,
const yolo::ModelParam &model_param,
float light_vis_conf_threshold,
float light_swt_conf_threshold,
base::Blob<bool> *overlapped,
base::Blob<int> *idx_sm,
std::vector<base::ObjectPtr> *objects) {
bool multi_scale = false;
if (yolo_blobs.det2_obj_blob){
multi_scale = true;
}
int num_classes = types.size();
int batch = yolo_blobs.det1_obj_blob->shape(0);
int num_anchor = yolo_blobs.anchor_blob->shape(2);
int num_anchor_per_scale = num_anchor;
if (multi_scale){
num_anchor_per_scale /= numScales;
}
CHECK_EQ(batch, 1) << "batch size should be 1!";
std::vector<int> height_vec, width_vec, num_candidates_vec;
height_vec.push_back(yolo_blobs.det1_obj_blob->shape(1));
width_vec.push_back(yolo_blobs.det1_obj_blob->shape(2));
if (multi_scale){
height_vec.push_back(yolo_blobs.det2_obj_blob->shape(1));
height_vec.push_back(yolo_blobs.det3_obj_blob->shape(1));
width_vec.push_back(yolo_blobs.det2_obj_blob->shape(2));
width_vec.push_back(yolo_blobs.det3_obj_blob->shape(2));
}
for (size_t i=0; i<height_vec.size(); i++){
num_candidates_vec.push_back(
height_vec[i] * width_vec[i] * num_anchor_per_scale);
}
const float* loc_data_vec[3] = {yolo_blobs.det1_loc_blob->gpu_data(),
yolo_blobs.det2_loc_blob? yolo_blobs.det2_loc_blob->gpu_data() : nullptr,
yolo_blobs.det3_loc_blob? yolo_blobs.det3_loc_blob->gpu_data() : nullptr};
const float* obj_data_vec[3] = {yolo_blobs.det1_obj_blob->gpu_data(),
yolo_blobs.det2_obj_blob? yolo_blobs.det2_obj_blob->gpu_data() : nullptr,
yolo_blobs.det3_obj_blob? yolo_blobs.det3_obj_blob->gpu_data() : nullptr};
const float* cls_data_vec[3] = {yolo_blobs.det1_cls_blob->gpu_data(),
yolo_blobs.det2_cls_blob? yolo_blobs.det2_cls_blob->gpu_data() : nullptr,
yolo_blobs.det3_cls_blob? yolo_blobs.det3_cls_blob->gpu_data() : nullptr};
const float* ori_data_vec[3] = {get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det1_ori_blob),
multi_scale? get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det2_ori_blob) : nullptr,
multi_scale? get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det3_ori_blob) : nullptr};
const float* dim_data_vec[3] = {get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det1_dim_blob),
multi_scale? get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det2_dim_blob) : nullptr,
multi_scale? get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det3_dim_blob) : nullptr};
//TODO[KaWai]: add 3 scale frbox data and light data.
const float *lof_data = get_gpu_data(
model_param.with_frbox(), *yolo_blobs.lof_blob);
const float *lor_data = get_gpu_data(
model_param.with_frbox(), *yolo_blobs.lor_blob);
const float *area_id_data = get_gpu_data(
model_param.num_areas() > 0, *yolo_blobs.area_id_blob);
const float *visible_ratio_data = get_gpu_data(
model_param.with_ratios(), *yolo_blobs.visible_ratio_blob);
const float *cut_off_ratio_data = get_gpu_data(
model_param.with_ratios(), *yolo_blobs.cut_off_ratio_blob);
const auto &with_lights = model_param.with_lights();
const float *brvis_data = get_gpu_data(with_lights, *yolo_blobs.brvis_blob);
const float *brswt_data = get_gpu_data(with_lights, *yolo_blobs.brswt_blob);
const float *ltvis_data = get_gpu_data(with_lights, *yolo_blobs.ltvis_blob);
const float *ltswt_data = get_gpu_data(with_lights, *yolo_blobs.ltswt_blob);
const float *rtvis_data = get_gpu_data(with_lights, *yolo_blobs.rtvis_blob);
const float *rtswt_data = get_gpu_data(with_lights, *yolo_blobs.rtswt_blob);
int all_scales_num_candidates = 0;
for (size_t i = 0; i < num_candidates_vec.size(); i++){
all_scales_num_candidates += num_candidates_vec[i];
}
yolo_blobs.res_box_blob->Reshape(
std::vector<int>{1, 1, all_scales_num_candidates, kBoxBlockSize});
yolo_blobs.res_cls_blob->Reshape(
std::vector<int>{1, 1, num_classes + 1, all_scales_num_candidates});
float *res_box_data = yolo_blobs.res_box_blob->mutable_gpu_data();
float *res_cls_data = yolo_blobs.res_cls_blob->mutable_gpu_data();
const int thread_size = 512;
//TODO[KaWai]: use different stream to process scales in parallel.
int num_candidates_offset = 0;
for (int i = 0; i < num_candidates_vec.size(); i++){
int block_size = (num_candidates_vec[i] + thread_size - 1) / thread_size;
const float *loc_data = loc_data_vec[i];
const float *obj_data = obj_data_vec[i];
const float *cls_data = cls_data_vec[i];
const float *ori_data = ori_data_vec[i];
const float *dim_data = dim_data_vec[i];
const float *anchor_data = yolo_blobs.anchor_blob->gpu_data()
+ num_anchor_per_scale * 2 * i;
const float *expand_data = yolo_blobs.expand_blob->gpu_data();
const int width = width_vec[i];
const int height = height_vec[i];
hipLaunchKernelGGL(( get_object_kernel) , dim3(block_size), dim3(thread_size), 0, stream ,
num_candidates_vec[i], loc_data, obj_data,
cls_data, ori_data, dim_data,
lof_data, lor_data, area_id_data,
visible_ratio_data, cut_off_ratio_data,
brvis_data, brswt_data, ltvis_data, ltswt_data,
rtvis_data, rtswt_data,
anchor_data,
yolo_blobs.expand_blob->gpu_data(),
width, height, num_anchor_per_scale,
num_classes, model_param.confidence_threshold(),
light_vis_conf_threshold, light_swt_conf_threshold,
model_param.with_box3d(), model_param.with_frbox(),
model_param.with_lights(), model_param.with_ratios(),
multi_scale,
model_param.num_areas(),
res_box_data + num_candidates_offset * kBoxBlockSize,
res_cls_data, num_candidates_offset,
all_scales_num_candidates);
hipStreamSynchronize(stream);
num_candidates_offset += num_candidates_vec[i];
}
const float *cpu_cls_data = yolo_blobs.res_cls_blob->cpu_data();
std::vector<int> all_indices(all_scales_num_candidates);
std::iota(all_indices.begin(), all_indices.end(), 0);
std::vector<int> rest_indices;
std::map<base::ObjectSubType, std::vector<int>> indices;
std::map<base::ObjectSubType, std::vector<float>> conf_scores;
int top_k = idx_sm->count();
int num_kept = 0;
// inter-cls NMS
apply_nms_gpu(res_box_data,
cpu_cls_data + num_classes * all_scales_num_candidates,
all_indices,
kBoxBlockSize,
nms.inter_cls_conf_thresh,
top_k,
nms.inter_cls_nms_thresh,
&rest_indices,
overlapped,
idx_sm,
stream);
for (int k = 0; k < num_classes; ++k) {
apply_nms_gpu(res_box_data,
cpu_cls_data + k * all_scales_num_candidates,
rest_indices,
kBoxBlockSize,
model_param.confidence_threshold(),
top_k,
nms.threshold,
&(indices[types[k]]),
overlapped,
idx_sm,
stream);
num_kept += indices[types[k]].size();
std::vector<float> conf_score(
cpu_cls_data + k * all_scales_num_candidates,
cpu_cls_data + (k + 1) * all_scales_num_candidates);
conf_scores.insert(std::make_pair(types[k], conf_score));
hipStreamSynchronize(stream);
}
objects->clear();
if (num_kept == 0) {
return;
}
objects->reserve(num_kept);
const float *cpu_box_data = yolo_blobs.res_box_blob->cpu_data();
ObjectMaintainer maintainer;
for (auto it = indices.begin(); it != indices.end(); ++it) {
base::ObjectSubType label = it->first;
if (conf_scores.find(label) == conf_scores.end()) {
// Something bad happened if there are no predictions for current label.
continue;
}
const std::vector<float> &scores = conf_scores.find(label)->second;
std::vector<int> &indice = it->second;
for (size_t j = 0; j < indice.size(); ++j) {
int idx = indice[j];
const float *bbox = cpu_box_data + idx * kBoxBlockSize;
if (scores[idx] < model_param.confidence_threshold()) {
continue;
}
base::ObjectPtr obj = nullptr;
obj.reset(new base::Object);
obj->type = base::kSubType2TypeMap.at(label);
obj->sub_type = label;
obj->type_probs.assign(
static_cast<int>(base::ObjectType::MAX_OBJECT_TYPE), 0);
obj->sub_type_probs.assign(
static_cast<int>(base::ObjectSubType::MAX_OBJECT_TYPE), 0);
float total = 1e-5;
for (int k = 0; k < num_classes; ++k) {
auto &vis_type_k = types[k];
auto &obj_type_k = base::kSubType2TypeMap.at(vis_type_k);
auto &conf_score = conf_scores[vis_type_k][idx];
obj->type_probs[static_cast<int>(obj_type_k)] += conf_score;
obj->sub_type_probs[static_cast<int>(vis_type_k)] =
conf_score;
total += conf_score;
}
obj->confidence = obj->type_probs[static_cast<int>(obj->type)];
for (int k = 0; k < obj->type_probs.size(); ++k) {
obj->type_probs[k] /= total;
}
fill_base(obj, bbox);
fill_bbox3d(model_param.with_box3d(), obj, bbox + 4);
fill_frbox(model_param.with_frbox(), obj, bbox + 8);
fill_lights(model_param.with_lights(), obj, bbox + 16);
fill_ratios(model_param.with_ratios(), obj, bbox + 22);
fill_area_id(model_param.num_areas() > 0, obj, bbox + 30);
if (maintainer.Add(idx, obj)) {
objects->push_back(obj);
}
}
}
}
void get_intersect_bbox(const NormalizedBBox &bbox1,
const NormalizedBBox &bbox2,
NormalizedBBox *intersect_bbox) {
if (bbox2.xmin > bbox1.xmax || bbox2.xmax < bbox1.xmin ||
bbox2.ymin > bbox1.ymax || bbox2.ymax < bbox1.ymin) {
// Return [0, 0, 0, 0] if there is no intersection.
intersect_bbox->xmin = 0;
intersect_bbox->ymin = 0;
intersect_bbox->xmax = 0;
intersect_bbox->ymax = 0;
} else {
intersect_bbox->xmin = ::max(bbox1.xmin, bbox2.xmin);
intersect_bbox->ymin = ::max(bbox1.ymin, bbox2.ymin);
intersect_bbox->xmax = ::min(bbox1.xmax, bbox2.xmax);
intersect_bbox->ymax = ::min(bbox1.ymax, bbox2.ymax);
}
}
float get_bbox_size(const NormalizedBBox &bbox) {
if (bbox.xmax < bbox.xmin || bbox.ymax < bbox.ymin) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0;
} else {
if (bbox.size >= 0) {
return bbox.size;
} else {
float width = bbox.xmax - bbox.xmin;
float height = bbox.ymax - bbox.ymin;
return width * height;
}
}
}
float get_jaccard_overlap(const NormalizedBBox &bbox1,
const NormalizedBBox &bbox2) {
NormalizedBBox intersect_bbox;
get_intersect_bbox(bbox1, bbox2, &intersect_bbox);
float intersect_width = 0.f;
float intersect_height = 0.f;
intersect_width = intersect_bbox.xmax - intersect_bbox.xmin;
intersect_height = intersect_bbox.ymax - intersect_bbox.ymin;
if (intersect_width > 0 && intersect_height > 0) {
float intersect_size = intersect_width * intersect_height;
float bbox1_size = get_bbox_size(bbox1);
float bbox2_size = get_bbox_size(bbox2);
return intersect_size / (bbox1_size + bbox2_size - intersect_size);
} else {
return 0.;
}
}
void get_max_score_index(const std::vector<float> &scores,
const float threshold,
const int top_k,
std::vector<std::pair<float, int> > *score_index_vec) {
// Generate index score pairs.
for (size_t i = 0; i < scores.size(); ++i) {
if (scores[i] > threshold) {
score_index_vec->push_back(std::make_pair(scores[i], i));
}
}
// Sort the score pair according to the scores in descending order
std::stable_sort(score_index_vec->begin(), score_index_vec->end(),
sort_score_pair_descend<int>);
// Keep top_k scores if needed.
if (top_k > -1 && top_k < static_cast<int>(score_index_vec->size())) {
score_index_vec->resize(top_k);
}
}
void apply_softnms_fast(const std::vector<NormalizedBBox> &bboxes,
std::vector<float> *scores,
const float score_threshold,
const float nms_threshold,
const int top_k,
std::vector<int> *indices,
bool is_linear,
const float sigma) {
// Sanity check.
CHECK_EQ(bboxes.size(), scores->size())
<< "bboxes and scores have different size.";
// Get top_k scores (with corresponding indices).
std::vector<std::pair<float, int> > score_index_vec;
get_max_score_index(*scores, score_threshold, top_k, &score_index_vec);
// Do nms.
indices->clear();
while (score_index_vec.size() != 0) {
auto best_it =
max_element(std::begin(score_index_vec), std::end(score_index_vec));
const int best_idx = (*best_it).second;
score_index_vec.erase(best_it);
const NormalizedBBox &best_bbox = bboxes[best_idx];
indices->push_back(best_idx);
for (std::vector<std::pair<float, int> >::iterator
it = score_index_vec.begin();
it != score_index_vec.end();) {
int cur_idx = it->second;
const NormalizedBBox &cur_bbox = bboxes[cur_idx];
float cur_overlap = 0.;
cur_overlap = get_jaccard_overlap(best_bbox, cur_bbox);
if (is_linear) {
(*scores)[cur_idx] *= (1.0 - cur_overlap);
} else {
(*scores)[cur_idx] *= exp(-1.0 * pow(cur_overlap, 2) / sigma);
}
++it;
}
}
}
void apply_boxvoting_fast(std::vector<NormalizedBBox> *bboxes,
std::vector<float> *scores,
const float conf_threshold,
const float nms_threshold,
const float sigma,
std::vector<int> *indices) {
if (bboxes->size() == 0) {
return;
}
indices->clear();
for (size_t i = 0; i < bboxes->size(); ++i) {
(*bboxes)[i].mask = false;
if ((*scores)[i] > conf_threshold) {
indices->push_back(i);
}
}
for (size_t count = 0; count < indices->size(); ++count) {
int max_box_idx = 0;
for (size_t i = 1; i < indices->size(); ++i) {
int idx = indices->at(i);
if ((*bboxes)[idx].mask) {
continue;
}
if ((*scores)[idx] > (*scores)[max_box_idx]) {
max_box_idx = idx;
}
}
NormalizedBBox &best_bbox = (*bboxes)[max_box_idx];
best_bbox.score = (*scores)[max_box_idx];
best_bbox.mask = true;
float s_vt = (*scores)[max_box_idx];
float x1_vt = best_bbox.xmin * s_vt;
float x2_vt = best_bbox.xmax * s_vt;
float y1_vt = best_bbox.ymin * s_vt;
float y2_vt = best_bbox.ymax * s_vt;
for (size_t i = 0; i < indices->size(); ++i) {
int sub_it = indices->at(i);
if ((*bboxes)[sub_it].mask) {
continue;
}
float cur_overlap = 0.;
cur_overlap = get_jaccard_overlap(best_bbox, (*bboxes)[sub_it]);
if (sigma == 0) {
(*bboxes)[sub_it].mask = true;
} else {
(*scores)[sub_it] *= exp(-1.0 * pow(cur_overlap, 2) / sigma);
}
(*bboxes)[sub_it].score = (*scores)[sub_it];
// Remove it if necessary
if (cur_overlap > nms_threshold) {
float s_vt_cur = (*bboxes)[sub_it].score;
s_vt += s_vt_cur;
x1_vt += (*bboxes)[sub_it].xmin * s_vt_cur;
x2_vt += (*bboxes)[sub_it].xmax * s_vt_cur;
y1_vt += (*bboxes)[sub_it].ymin * s_vt_cur;
y2_vt += (*bboxes)[sub_it].ymax * s_vt_cur;
}
}
if (s_vt > 0.0001) {
(*bboxes)[max_box_idx].xmin = x1_vt / s_vt;
(*bboxes)[max_box_idx].xmax = x2_vt / s_vt;
(*bboxes)[max_box_idx].ymin = y1_vt / s_vt;
(*bboxes)[max_box_idx].ymax = y2_vt / s_vt;
}
}
}
void apply_nms_fast(const std::vector<NormalizedBBox> &bboxes,
const std::vector<float> &scores,
const float score_threshold,
const float nms_threshold,
const float eta,
const int top_k,
std::vector<int> *indices) {
// Sanity check.
CHECK_EQ(bboxes.size(), scores.size())
<< "bboxes and scores have different size.";
// Get top_k scores (with corresponding indices).
std::vector<std::pair<float, int> > score_index_vec;
get_max_score_index(scores, score_threshold, top_k, &score_index_vec);
// Do nms.
float adaptive_threshold = nms_threshold;
indices->clear();
while (score_index_vec.size() != 0) {
const int idx = score_index_vec.front().second;
bool keep = true;
for (size_t k = 0; k < indices->size(); ++k) {
if (keep) {
const int kept_idx = (*indices)[k];
float overlap = get_jaccard_overlap(bboxes[idx], bboxes[kept_idx]);
keep = overlap <= adaptive_threshold;
} else {
break;
}
}
if (keep) {
indices->push_back(idx);
}
score_index_vec.erase(score_index_vec.begin());
if (keep && eta < 1 && adaptive_threshold > 0.5) {
adaptive_threshold *= eta;
}
}
}
void filter_bbox(const MinDims &min_dims,
std::vector<base::ObjectPtr> *objects) {
size_t valid_obj_idx = 0;
size_t total_obj_idx = 0;
while (total_obj_idx < objects->size()) {
const auto &obj = (*objects)[total_obj_idx];
if ((obj->camera_supplement.box.ymax
- obj->camera_supplement.box.ymin) >= min_dims.min_2d_height &&
(min_dims.min_3d_height <= 0 || obj->size[2] >= min_dims.min_3d_height)
&&
(min_dims.min_3d_width <= 0
|| obj->size[1] >= min_dims.min_3d_width) &&
(min_dims.min_3d_length <= 0
|| obj->size[0] >= min_dims.min_3d_length)) {
(*objects)[valid_obj_idx] =
(*objects)[total_obj_idx];
++valid_obj_idx;
}
++total_obj_idx;
}
objects->resize(valid_obj_idx);
}
void recover_bbox(int roi_w, int roi_h, int offset_y,
std::vector<base::ObjectPtr> *objects) {
for (auto &obj : *objects) {
float xmin = obj->camera_supplement.box.xmin;
float ymin = obj->camera_supplement.box.ymin;
float xmax = obj->camera_supplement.box.xmax;
float ymax = obj->camera_supplement.box.ymax;
int x = xmin * roi_w;
int w = (xmax - xmin) * roi_w;
int y = ymin * roi_h + offset_y;
int h = (ymax - ymin) * roi_h;
base::RectF rect_det(x, y, w, h);
base::RectF rect_img(0, 0, roi_w, roi_h + offset_y);
base::RectF rect = rect_det & rect_img;
obj->camera_supplement.box = rect;
double eps = 1e-2;
// Truncation assignment based on bbox positions
if ((ymin < eps) || (ymax >= 1.0 - eps)) {
obj->camera_supplement.truncated_vertical = 0.5;
} else {
obj->camera_supplement.truncated_vertical = 0.0;
}
if ((xmin < eps) || (xmax >= 1.0 - eps)) {
obj->camera_supplement.truncated_horizontal = 0.5;
} else {
obj->camera_supplement.truncated_horizontal = 0.0;
}
obj->camera_supplement.front_box.xmin *= roi_w;
obj->camera_supplement.front_box.ymin *= roi_h;
obj->camera_supplement.front_box.xmax *= roi_w;
obj->camera_supplement.front_box.ymax *= roi_h;
obj->camera_supplement.back_box.xmin *= roi_w;
obj->camera_supplement.back_box.ymin *= roi_h;
obj->camera_supplement.back_box.xmax *= roi_w;
obj->camera_supplement.back_box.ymax *= roi_h;
obj->camera_supplement.front_box.ymin += offset_y;
obj->camera_supplement.front_box.ymax += offset_y;
obj->camera_supplement.back_box.ymin += offset_y;
obj->camera_supplement.back_box.ymax += offset_y;
}
}
void fill_base(base::ObjectPtr obj, const float *bbox) {
obj->camera_supplement.box.xmin = bbox[0];
obj->camera_supplement.box.ymin = bbox[1];
obj->camera_supplement.box.xmax = bbox[2];
obj->camera_supplement.box.ymax = bbox[3];
}
void fill_bbox3d(bool with_box3d, base::ObjectPtr obj, const float *bbox) {
if (with_box3d) {
obj->camera_supplement.alpha = bbox[0];
obj->size[2] = bbox[1];
obj->size[1] = bbox[2];
obj->size[0] = bbox[3];
}
}
void fill_frbox(bool with_frbox, base::ObjectPtr obj, const float *bbox) {
if (with_frbox) {
obj->camera_supplement.front_box.xmin = bbox[0];
obj->camera_supplement.front_box.ymin = bbox[1];
obj->camera_supplement.front_box.xmax = bbox[2];
obj->camera_supplement.front_box.ymax = bbox[3];
obj->camera_supplement.back_box.xmin = bbox[4];
obj->camera_supplement.back_box.ymin = bbox[5];
obj->camera_supplement.back_box.xmax = bbox[6];
obj->camera_supplement.back_box.ymax = bbox[7];
}
}
void fill_lights(bool with_lights, base::ObjectPtr obj, const float *bbox) {
if (with_lights) {
obj->car_light.brake_visible = bbox[0];
obj->car_light.brake_switch_on = bbox[1];
obj->car_light.left_turn_visible = bbox[2];
obj->car_light.left_turn_switch_on = bbox[3];
obj->car_light.right_turn_visible = bbox[4];
obj->car_light.right_turn_switch_on = bbox[5];
}
}
void fill_ratios(bool with_ratios, base::ObjectPtr obj, const float *bbox) {
if (with_ratios) {
// visible ratios of face a/b/c/d
obj->camera_supplement.visible_ratios[0] = bbox[0];
obj->camera_supplement.visible_ratios[1] = bbox[1];
obj->camera_supplement.visible_ratios[2] = bbox[2];
obj->camera_supplement.visible_ratios[3] = bbox[3];
// cut off on width and length (3D)
obj->camera_supplement.cut_off_ratios[0] = bbox[4];
obj->camera_supplement.cut_off_ratios[1] = bbox[5];
// cut off on left and right side (2D)
obj->camera_supplement.cut_off_ratios[2] = bbox[6];
obj->camera_supplement.cut_off_ratios[3] = bbox[7];
}
}
void fill_area_id(bool with_flag, base::ObjectPtr obj, const float *data) {
if (with_flag) {
obj->camera_supplement.area_id = static_cast<int>(data[0]);
// obj->camera_supplement.area_id_prob = data[1];
}
}
int get_area_id(float visible_ratios[4]) {
int area_id = 0;
int max_face = 0;
for (int i = 1; i < 4; ++i) {
if (visible_ratios[i] > visible_ratios[max_face]) {
max_face = i;
}
}
int left_face = (max_face + 1) % 4;
int right_face = (max_face + 3) % 4;
const float eps = 1e-3;
float max_ratio = visible_ratios[max_face];
float left_ratio = visible_ratios[left_face];
float right_ratio = visible_ratios[right_face];
memset(visible_ratios, 0, 4 * sizeof(visible_ratios[0]));
if (left_ratio < eps && right_ratio < eps) {
area_id = (max_face * 2 + 1);
visible_ratios[max_face] = 1.f;
} else if (left_ratio > right_ratio) {
area_id = (max_face * 2 + 2);
auto &&sum_ratio = left_ratio + max_ratio;
visible_ratios[max_face] = max_ratio / sum_ratio;
visible_ratios[left_face] = left_ratio / sum_ratio;
} else {
area_id = (max_face * 2);
if (area_id == 0) {
area_id = 8;
}
auto &&sum_ratio = right_ratio + max_ratio;
visible_ratios[max_face] = max_ratio / sum_ratio;
visible_ratios[right_face] = right_ratio / sum_ratio;
}
return area_id;
}
} // namespace camera
} // namespace perception
} // namespace apollo
| 8e36e286027f2388a4a06292d5bd3fc184442c3f.cu | /******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <memory>
#include <utility>
#include <vector>
#include <map>
#include <functional>
#include <algorithm>
#include "thrust/functional.h"
#include "thrust/sort.h"
#include "boost/iterator/counting_iterator.hpp"
#include "modules/perception/camera/lib/obstacle/detector/yolo/region_output.h"
#include "modules/perception/base/object_types.h"
#include "modules/perception/camera/lib/obstacle/detector/yolo/object_maintainer.h"
namespace apollo {
namespace perception {
namespace camera {
__host__ __device__
float sigmoid_gpu(float x) {
return 1.0 / (1.0 + exp(-x));
}
__host__ __device__
float bbox_size_gpu(const float *bbox,
const bool normalized) {
if (bbox[2] <= bbox[0] || bbox[3] <= bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0.f; // NOLINT
} else {
const float width = bbox[2] - bbox[0];
const float height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
__host__ __device__
float jaccard_overlap_gpu(const float *bbox1,
const float *bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return float(0.); // NOLINT
} else {
const float inter_xmin = max(bbox1[0], bbox2[0]);
const float inter_ymin = max(bbox1[1], bbox2[1]);
const float inter_xmax = min(bbox1[2], bbox2[2]);
const float inter_ymax = min(bbox1[3], bbox2[3]);
const float inter_width = inter_xmax - inter_xmin;
const float inter_height = inter_ymax - inter_ymin;
const float inter_size = inter_width * inter_height;
const float bbox1_size = bbox_size_gpu(bbox1, true);
const float bbox2_size = bbox_size_gpu(bbox2, true);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
__global__ void get_object_kernel(int n,
const float *loc_data,
const float *obj_data,
const float *cls_data,
const float *ori_data,
const float *dim_data,
const float *lof_data,
const float *lor_data,
const float *area_id_data,
const float *visible_ratio_data,
const float *cut_off_ratio_data,
const float *brvis_data,
const float *brswt_data,
const float *ltvis_data,
const float *ltswt_data,
const float *rtvis_data,
const float *rtswt_data,
const float *anchor_data,
const float *expand_data,
int width,
int height,
int num_anchors,
int num_classes,
float confidence_threshold,
float light_vis_conf_threshold,
float light_swt_conf_threshold,
bool with_box3d,
bool with_frbox,
bool with_lights,
bool with_ratios,
bool multi_scale,
int num_areas,
float *res_box_data,
float *res_cls_data,
int res_cls_offset,
int all_scales_num_candidates) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n);
i += blockDim.x * gridDim.x) {
int box_block = kBoxBlockSize;
int idx = i;
int c = idx % num_anchors;
idx = idx / num_anchors;
int w = idx % width;
idx = idx / width;
int h = idx;
int loc_index = (h * width + w) * num_anchors + c;
int offset_loc = loc_index * 4;
int offset_cls = loc_index * num_classes;
float scale = obj_data[loc_index];
float cx = (w + sigmoid_gpu(loc_data[offset_loc + 0])) / width;
float cy = (h + sigmoid_gpu(loc_data[offset_loc + 1])) / height;
float hw =
exp(max(minExpPower, min(loc_data[offset_loc + 2], maxExpPower))) *
anchor_data[2 * c] / width * 0.5;
float hh =
exp(max(minExpPower, min(loc_data[offset_loc + 3], maxExpPower))) *
anchor_data[2 * c + 1] / height * 0.5;
float max_prob = 0.f;
int max_index = 0;
for (int k = 0; k < num_classes; ++k) {
float prob = cls_data[offset_cls + k] * scale;
res_cls_data[k * all_scales_num_candidates
+ res_cls_offset + i] = prob;
if (prob > max_prob) {
max_prob = prob;
max_index = k;
}
}
res_cls_data[num_classes * all_scales_num_candidates
+ res_cls_offset + i] = max_prob;
auto &&dst_ptr = res_box_data + i * box_block;
hw += expand_data[max_index];
dst_ptr[0] = cx - hw;
dst_ptr[1] = cy - hh;
dst_ptr[2] = cx + hw;
dst_ptr[3] = cy + hh;
if (with_box3d) {
int offset_ori = loc_index * 2;
dst_ptr[4] = atan2(ori_data[offset_ori + 1], ori_data[offset_ori]);
int offset_dim = loc_index * 3;
if (multi_scale){
offset_dim = loc_index * num_classes * 3 + max_index * 3;
}
dst_ptr[5] = dim_data[offset_dim + 0];
dst_ptr[6] = dim_data[offset_dim + 1];
dst_ptr[7] = dim_data[offset_dim + 2];
}
if (with_frbox) {
{
int offset_lof = loc_index * 4;
auto &&src_ptr = lof_data + offset_lof;
auto sb_x = src_ptr[0] * hw * 2 + cx;
auto sb_y = src_ptr[1] * hh * 2 + cy;
auto sb_hw = exp(src_ptr[2]) * hw;
auto sb_hh = exp(src_ptr[3]) * hh;
dst_ptr[8] = sb_x - sb_hw;
dst_ptr[9] = sb_y - sb_hh;
dst_ptr[10] = sb_x + sb_hw;
dst_ptr[11] = sb_y + sb_hh;
}
{
int offset_lor = loc_index * 4;
auto &&src_ptr = lor_data + offset_lor;
auto sb_x = src_ptr[0] * hw * 2 + cx;
auto sb_y = src_ptr[1] * hh * 2 + cy;
auto sb_hw = exp(src_ptr[2]) * hw;
auto sb_hh = exp(src_ptr[3]) * hh;
dst_ptr[12] = sb_x - sb_hw;
dst_ptr[13] = sb_y - sb_hh;
dst_ptr[14] = sb_x + sb_hw;
dst_ptr[15] = sb_y + sb_hh;
}
}
if (with_lights) {
dst_ptr[16] = sigmoid_gpu(brvis_data[loc_index]);
dst_ptr[17] = sigmoid_gpu(brswt_data[loc_index]);
dst_ptr[18] = sigmoid_gpu(ltvis_data[loc_index]);
dst_ptr[19] = sigmoid_gpu(ltswt_data[loc_index]);
dst_ptr[20] = sigmoid_gpu(rtvis_data[loc_index]);
dst_ptr[21] = sigmoid_gpu(rtswt_data[loc_index]);
dst_ptr[16] = dst_ptr[16] > light_vis_conf_threshold ? dst_ptr[16] : 0;
dst_ptr[18] = dst_ptr[18] > light_vis_conf_threshold ? dst_ptr[18] : 0;
dst_ptr[20] = dst_ptr[20] > light_vis_conf_threshold ? dst_ptr[20] : 0;
float swt_score = 0;
swt_score = dst_ptr[16] * dst_ptr[17];
dst_ptr[17] = swt_score > light_swt_conf_threshold ? swt_score : 0;
swt_score = dst_ptr[18] * dst_ptr[19];
dst_ptr[19] = swt_score > light_swt_conf_threshold ? swt_score : 0;
swt_score = dst_ptr[20] * dst_ptr[21];
dst_ptr[21] = swt_score > light_swt_conf_threshold ? swt_score : 0;
}
if (with_ratios) {
// 0~3: cos2, left, visa, visb
auto vis_pred = visible_ratio_data + loc_index * 4;
auto vis_ptr = dst_ptr + 22;
vis_ptr[0] = vis_ptr[1] = vis_ptr[2] = vis_ptr[3] = 0;
const float hi_th = 0.75;
const float lo_th = 1.f - hi_th;
if (vis_pred[2] >= hi_th && vis_pred[3] >= hi_th) { // 2 (1, 3)
vis_ptr[0] = vis_pred[0];
vis_ptr[1] = 1 - vis_pred[0];
} else if (vis_pred[2] <= lo_th && vis_pred[3] >= hi_th) { // 4 (3, 5)
vis_ptr[2] = vis_pred[0];
vis_ptr[1] = 1 - vis_pred[0];
} else if (vis_pred[2] <= lo_th && vis_pred[3] <= lo_th) { // 6 (5, 7)
vis_ptr[2] = vis_pred[0];
vis_ptr[3] = 1 - vis_pred[0];
} else if (vis_pred[2] >= hi_th && vis_pred[3] <= lo_th) { // 8 (7, 1)
vis_ptr[0] = vis_pred[0];
vis_ptr[3] = 1 - vis_pred[0];
} else {
vis_ptr[2] = vis_pred[0];
if (vis_pred[1] > 0.5) {
vis_ptr[1] = 1 - vis_pred[0];
} else {
vis_ptr[3] = 1 - vis_pred[0];
}
}
int offset_cut = loc_index * 4;
dst_ptr[26] = cut_off_ratio_data[offset_cut + 0];
dst_ptr[27] = cut_off_ratio_data[offset_cut + 1];
dst_ptr[28] = cut_off_ratio_data[offset_cut + 2];
dst_ptr[29] = cut_off_ratio_data[offset_cut + 3];
}
if (num_areas > 0) {
int offset_area_id = loc_index * num_areas;
int max_area_id = 0;
for (int area_id = 1; area_id < num_areas; ++area_id) {
if (area_id_data[offset_area_id + area_id] >
area_id_data[offset_area_id + max_area_id]) {
max_area_id = area_id;
}
}
dst_ptr[30] = max_area_id + 1;
dst_ptr[31] = area_id_data[offset_area_id + max_area_id];
}
}
}
__global__ void get_rois_kernel(int num_bboxes,
const float *loc_data,
const float *obj_data,
const float *anchor_data,
int width,
int height,
int num_anchors,
float confidence_threshold,
float *conf_data,
float *bbox_data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_bboxes) {
int offset_obj = idx;
int offset_loc = idx * 4;
int c = idx % num_anchors;
idx /= num_anchors;
int w = idx % width;
idx /= width;
int h = idx;
float cx = (w + sigmoid_gpu(loc_data[offset_loc + 0])) / width;
float cy = (h + sigmoid_gpu(loc_data[offset_loc + 1])) / height;
float hw =
exp(loc_data[offset_loc + 2]) * anchor_data[2 * c + 0] / width * 0.5;
float hh =
exp(loc_data[offset_loc + 3]) * anchor_data[2 * c + 1] / height * 0.5;
const float &conf = obj_data[offset_obj];
conf_data[offset_obj] = conf > confidence_threshold ? conf : 0;
auto &&curr_bbox = bbox_data + offset_loc;
curr_bbox[0] = cx - hw;
curr_bbox[1] = cy - hh;
curr_bbox[2] = cx + hw;
curr_bbox[3] = cy + hh;
}
}
__global__ void compute_overlapped_by_idx_kernel(
const int nthreads,
const float *bbox_data,
const int bbox_step,
const float overlap_threshold,
const int *idx,
const int num_idx,
bool *overlapped_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < (nthreads); index += blockDim.x * gridDim.x) {
const int j = index % num_idx;
const int i = index / num_idx;
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * bbox_step;
const int start_loc_j = idx[j] * bbox_step;
const float overlap = jaccard_overlap_gpu(bbox_data + start_loc_i,
bbox_data + start_loc_j);
overlapped_data[index] = overlap > overlap_threshold;
}
}
void compute_overlapped_by_idx_gpu(const int nthreads,
const float *bbox_data,
const int bbox_step,
const float overlap_threshold,
const int *idx,
const int num_idx,
bool *overlapped_data,
const cudaStream_t &stream) {
// NOLINT_NEXT_LINE(whitespace/operators)
const int thread_size = 512;
int block_size = (nthreads + thread_size - 1) / thread_size;
compute_overlapped_by_idx_kernel << < block_size, thread_size, 0, stream >>
> (
nthreads, bbox_data, bbox_step, overlap_threshold, idx, num_idx,
overlapped_data);
}
void apply_nms_gpu(const float *bbox_data,
const float *conf_data,
const std::vector<int> &origin_indices,
const int bbox_step,
const float confidence_threshold,
const int top_k,
const float nms_threshold,
std::vector<int> *indices,
base::Blob<bool> *overlapped,
base::Blob<int> *idx_sm,
const cudaStream_t &stream) {
// Keep part of detections whose scores are higher than confidence threshold.
std::vector<int> idx;
std::vector<float> confidences;
for (auto i : origin_indices) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<float>());
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
int *idx_data = (idx_sm->mutable_cpu_data());
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
overlapped->Reshape(std::vector<int>{num_remain, num_remain});
bool *overlapped_data = (overlapped->mutable_gpu_data());
compute_overlapped_by_idx_gpu(overlapped->count(),
bbox_data,
bbox_step,
nms_threshold,
idx_sm->gpu_data(),
num_remain,
overlapped_data,
stream);
// Do non-maximum suppression based on overlapped results.
const bool *overlapped_results = (const bool *) overlapped->cpu_data();
std::vector<int> selected_indices;
apply_nms(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (size_t i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
void apply_nms(const bool *overlapped,
const int num,
std::vector<int> *indices) {
std::vector<int> index_vec(boost::counting_iterator<int>(0),
boost::counting_iterator<int>(num));
// Do nms.
indices->clear();
while (index_vec.size() != 0) {
// Get the current highest score box.
int best_idx = index_vec.front();
indices->push_back(best_idx);
// Erase the best box.
index_vec.erase(index_vec.begin());
for (std::vector<int>::iterator it = index_vec.begin();
it != index_vec.end();) {
int cur_idx = *it;
// Remove it if necessary
if (overlapped[best_idx * num + cur_idx]) {
it = index_vec.erase(it);
} else {
++it;
}
}
}
}
const float *get_gpu_data(bool flag, const base::Blob<float> &blob) {
return flag ? blob.gpu_data() : nullptr;
}
void get_objects_gpu(const YoloBlobs &yolo_blobs,
const cudaStream_t &stream,
const std::vector<base::ObjectSubType> &types,
const NMSParam &nms,
const yolo::ModelParam &model_param,
float light_vis_conf_threshold,
float light_swt_conf_threshold,
base::Blob<bool> *overlapped,
base::Blob<int> *idx_sm,
std::vector<base::ObjectPtr> *objects) {
bool multi_scale = false;
if (yolo_blobs.det2_obj_blob){
multi_scale = true;
}
int num_classes = types.size();
int batch = yolo_blobs.det1_obj_blob->shape(0);
int num_anchor = yolo_blobs.anchor_blob->shape(2);
int num_anchor_per_scale = num_anchor;
if (multi_scale){
num_anchor_per_scale /= numScales;
}
CHECK_EQ(batch, 1) << "batch size should be 1!";
std::vector<int> height_vec, width_vec, num_candidates_vec;
height_vec.push_back(yolo_blobs.det1_obj_blob->shape(1));
width_vec.push_back(yolo_blobs.det1_obj_blob->shape(2));
if (multi_scale){
height_vec.push_back(yolo_blobs.det2_obj_blob->shape(1));
height_vec.push_back(yolo_blobs.det3_obj_blob->shape(1));
width_vec.push_back(yolo_blobs.det2_obj_blob->shape(2));
width_vec.push_back(yolo_blobs.det3_obj_blob->shape(2));
}
for (size_t i=0; i<height_vec.size(); i++){
num_candidates_vec.push_back(
height_vec[i] * width_vec[i] * num_anchor_per_scale);
}
const float* loc_data_vec[3] = {yolo_blobs.det1_loc_blob->gpu_data(),
yolo_blobs.det2_loc_blob? yolo_blobs.det2_loc_blob->gpu_data() : nullptr,
yolo_blobs.det3_loc_blob? yolo_blobs.det3_loc_blob->gpu_data() : nullptr};
const float* obj_data_vec[3] = {yolo_blobs.det1_obj_blob->gpu_data(),
yolo_blobs.det2_obj_blob? yolo_blobs.det2_obj_blob->gpu_data() : nullptr,
yolo_blobs.det3_obj_blob? yolo_blobs.det3_obj_blob->gpu_data() : nullptr};
const float* cls_data_vec[3] = {yolo_blobs.det1_cls_blob->gpu_data(),
yolo_blobs.det2_cls_blob? yolo_blobs.det2_cls_blob->gpu_data() : nullptr,
yolo_blobs.det3_cls_blob? yolo_blobs.det3_cls_blob->gpu_data() : nullptr};
const float* ori_data_vec[3] = {get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det1_ori_blob),
multi_scale? get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det2_ori_blob) : nullptr,
multi_scale? get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det3_ori_blob) : nullptr};
const float* dim_data_vec[3] = {get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det1_dim_blob),
multi_scale? get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det2_dim_blob) : nullptr,
multi_scale? get_gpu_data(model_param.with_box3d(),
*yolo_blobs.det3_dim_blob) : nullptr};
//TODO[KaWai]: add 3 scale frbox data and light data.
const float *lof_data = get_gpu_data(
model_param.with_frbox(), *yolo_blobs.lof_blob);
const float *lor_data = get_gpu_data(
model_param.with_frbox(), *yolo_blobs.lor_blob);
const float *area_id_data = get_gpu_data(
model_param.num_areas() > 0, *yolo_blobs.area_id_blob);
const float *visible_ratio_data = get_gpu_data(
model_param.with_ratios(), *yolo_blobs.visible_ratio_blob);
const float *cut_off_ratio_data = get_gpu_data(
model_param.with_ratios(), *yolo_blobs.cut_off_ratio_blob);
const auto &with_lights = model_param.with_lights();
const float *brvis_data = get_gpu_data(with_lights, *yolo_blobs.brvis_blob);
const float *brswt_data = get_gpu_data(with_lights, *yolo_blobs.brswt_blob);
const float *ltvis_data = get_gpu_data(with_lights, *yolo_blobs.ltvis_blob);
const float *ltswt_data = get_gpu_data(with_lights, *yolo_blobs.ltswt_blob);
const float *rtvis_data = get_gpu_data(with_lights, *yolo_blobs.rtvis_blob);
const float *rtswt_data = get_gpu_data(with_lights, *yolo_blobs.rtswt_blob);
int all_scales_num_candidates = 0;
for (size_t i = 0; i < num_candidates_vec.size(); i++){
all_scales_num_candidates += num_candidates_vec[i];
}
yolo_blobs.res_box_blob->Reshape(
std::vector<int>{1, 1, all_scales_num_candidates, kBoxBlockSize});
yolo_blobs.res_cls_blob->Reshape(
std::vector<int>{1, 1, num_classes + 1, all_scales_num_candidates});
float *res_box_data = yolo_blobs.res_box_blob->mutable_gpu_data();
float *res_cls_data = yolo_blobs.res_cls_blob->mutable_gpu_data();
const int thread_size = 512;
//TODO[KaWai]: use different stream to process scales in parallel.
int num_candidates_offset = 0;
for (int i = 0; i < num_candidates_vec.size(); i++){
int block_size = (num_candidates_vec[i] + thread_size - 1) / thread_size;
const float *loc_data = loc_data_vec[i];
const float *obj_data = obj_data_vec[i];
const float *cls_data = cls_data_vec[i];
const float *ori_data = ori_data_vec[i];
const float *dim_data = dim_data_vec[i];
const float *anchor_data = yolo_blobs.anchor_blob->gpu_data()
+ num_anchor_per_scale * 2 * i;
const float *expand_data = yolo_blobs.expand_blob->gpu_data();
const int width = width_vec[i];
const int height = height_vec[i];
get_object_kernel <<< block_size, thread_size, 0, stream >>> (
num_candidates_vec[i], loc_data, obj_data,
cls_data, ori_data, dim_data,
lof_data, lor_data, area_id_data,
visible_ratio_data, cut_off_ratio_data,
brvis_data, brswt_data, ltvis_data, ltswt_data,
rtvis_data, rtswt_data,
anchor_data,
yolo_blobs.expand_blob->gpu_data(),
width, height, num_anchor_per_scale,
num_classes, model_param.confidence_threshold(),
light_vis_conf_threshold, light_swt_conf_threshold,
model_param.with_box3d(), model_param.with_frbox(),
model_param.with_lights(), model_param.with_ratios(),
multi_scale,
model_param.num_areas(),
res_box_data + num_candidates_offset * kBoxBlockSize,
res_cls_data, num_candidates_offset,
all_scales_num_candidates);
cudaStreamSynchronize(stream);
num_candidates_offset += num_candidates_vec[i];
}
const float *cpu_cls_data = yolo_blobs.res_cls_blob->cpu_data();
std::vector<int> all_indices(all_scales_num_candidates);
std::iota(all_indices.begin(), all_indices.end(), 0);
std::vector<int> rest_indices;
std::map<base::ObjectSubType, std::vector<int>> indices;
std::map<base::ObjectSubType, std::vector<float>> conf_scores;
int top_k = idx_sm->count();
int num_kept = 0;
// inter-cls NMS
apply_nms_gpu(res_box_data,
cpu_cls_data + num_classes * all_scales_num_candidates,
all_indices,
kBoxBlockSize,
nms.inter_cls_conf_thresh,
top_k,
nms.inter_cls_nms_thresh,
&rest_indices,
overlapped,
idx_sm,
stream);
for (int k = 0; k < num_classes; ++k) {
apply_nms_gpu(res_box_data,
cpu_cls_data + k * all_scales_num_candidates,
rest_indices,
kBoxBlockSize,
model_param.confidence_threshold(),
top_k,
nms.threshold,
&(indices[types[k]]),
overlapped,
idx_sm,
stream);
num_kept += indices[types[k]].size();
std::vector<float> conf_score(
cpu_cls_data + k * all_scales_num_candidates,
cpu_cls_data + (k + 1) * all_scales_num_candidates);
conf_scores.insert(std::make_pair(types[k], conf_score));
cudaStreamSynchronize(stream);
}
objects->clear();
if (num_kept == 0) {
return;
}
objects->reserve(num_kept);
const float *cpu_box_data = yolo_blobs.res_box_blob->cpu_data();
ObjectMaintainer maintainer;
for (auto it = indices.begin(); it != indices.end(); ++it) {
base::ObjectSubType label = it->first;
if (conf_scores.find(label) == conf_scores.end()) {
// Something bad happened if there are no predictions for current label.
continue;
}
const std::vector<float> &scores = conf_scores.find(label)->second;
std::vector<int> &indice = it->second;
for (size_t j = 0; j < indice.size(); ++j) {
int idx = indice[j];
const float *bbox = cpu_box_data + idx * kBoxBlockSize;
if (scores[idx] < model_param.confidence_threshold()) {
continue;
}
base::ObjectPtr obj = nullptr;
obj.reset(new base::Object);
obj->type = base::kSubType2TypeMap.at(label);
obj->sub_type = label;
obj->type_probs.assign(
static_cast<int>(base::ObjectType::MAX_OBJECT_TYPE), 0);
obj->sub_type_probs.assign(
static_cast<int>(base::ObjectSubType::MAX_OBJECT_TYPE), 0);
float total = 1e-5;
for (int k = 0; k < num_classes; ++k) {
auto &vis_type_k = types[k];
auto &obj_type_k = base::kSubType2TypeMap.at(vis_type_k);
auto &conf_score = conf_scores[vis_type_k][idx];
obj->type_probs[static_cast<int>(obj_type_k)] += conf_score;
obj->sub_type_probs[static_cast<int>(vis_type_k)] =
conf_score;
total += conf_score;
}
obj->confidence = obj->type_probs[static_cast<int>(obj->type)];
for (int k = 0; k < obj->type_probs.size(); ++k) {
obj->type_probs[k] /= total;
}
fill_base(obj, bbox);
fill_bbox3d(model_param.with_box3d(), obj, bbox + 4);
fill_frbox(model_param.with_frbox(), obj, bbox + 8);
fill_lights(model_param.with_lights(), obj, bbox + 16);
fill_ratios(model_param.with_ratios(), obj, bbox + 22);
fill_area_id(model_param.num_areas() > 0, obj, bbox + 30);
if (maintainer.Add(idx, obj)) {
objects->push_back(obj);
}
}
}
}
void get_intersect_bbox(const NormalizedBBox &bbox1,
const NormalizedBBox &bbox2,
NormalizedBBox *intersect_bbox) {
if (bbox2.xmin > bbox1.xmax || bbox2.xmax < bbox1.xmin ||
bbox2.ymin > bbox1.ymax || bbox2.ymax < bbox1.ymin) {
// Return [0, 0, 0, 0] if there is no intersection.
intersect_bbox->xmin = 0;
intersect_bbox->ymin = 0;
intersect_bbox->xmax = 0;
intersect_bbox->ymax = 0;
} else {
intersect_bbox->xmin = std::max(bbox1.xmin, bbox2.xmin);
intersect_bbox->ymin = std::max(bbox1.ymin, bbox2.ymin);
intersect_bbox->xmax = std::min(bbox1.xmax, bbox2.xmax);
intersect_bbox->ymax = std::min(bbox1.ymax, bbox2.ymax);
}
}
float get_bbox_size(const NormalizedBBox &bbox) {
if (bbox.xmax < bbox.xmin || bbox.ymax < bbox.ymin) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0;
} else {
if (bbox.size >= 0) {
return bbox.size;
} else {
float width = bbox.xmax - bbox.xmin;
float height = bbox.ymax - bbox.ymin;
return width * height;
}
}
}
float get_jaccard_overlap(const NormalizedBBox &bbox1,
const NormalizedBBox &bbox2) {
NormalizedBBox intersect_bbox;
get_intersect_bbox(bbox1, bbox2, &intersect_bbox);
float intersect_width = 0.f;
float intersect_height = 0.f;
intersect_width = intersect_bbox.xmax - intersect_bbox.xmin;
intersect_height = intersect_bbox.ymax - intersect_bbox.ymin;
if (intersect_width > 0 && intersect_height > 0) {
float intersect_size = intersect_width * intersect_height;
float bbox1_size = get_bbox_size(bbox1);
float bbox2_size = get_bbox_size(bbox2);
return intersect_size / (bbox1_size + bbox2_size - intersect_size);
} else {
return 0.;
}
}
void get_max_score_index(const std::vector<float> &scores,
const float threshold,
const int top_k,
std::vector<std::pair<float, int> > *score_index_vec) {
// Generate index score pairs.
for (size_t i = 0; i < scores.size(); ++i) {
if (scores[i] > threshold) {
score_index_vec->push_back(std::make_pair(scores[i], i));
}
}
// Sort the score pair according to the scores in descending order
std::stable_sort(score_index_vec->begin(), score_index_vec->end(),
sort_score_pair_descend<int>);
// Keep top_k scores if needed.
if (top_k > -1 && top_k < static_cast<int>(score_index_vec->size())) {
score_index_vec->resize(top_k);
}
}
void apply_softnms_fast(const std::vector<NormalizedBBox> &bboxes,
std::vector<float> *scores,
const float score_threshold,
const float nms_threshold,
const int top_k,
std::vector<int> *indices,
bool is_linear,
const float sigma) {
// Sanity check.
CHECK_EQ(bboxes.size(), scores->size())
<< "bboxes and scores have different size.";
// Get top_k scores (with corresponding indices).
std::vector<std::pair<float, int> > score_index_vec;
get_max_score_index(*scores, score_threshold, top_k, &score_index_vec);
// Do nms.
indices->clear();
while (score_index_vec.size() != 0) {
auto best_it =
max_element(std::begin(score_index_vec), std::end(score_index_vec));
const int best_idx = (*best_it).second;
score_index_vec.erase(best_it);
const NormalizedBBox &best_bbox = bboxes[best_idx];
indices->push_back(best_idx);
for (std::vector<std::pair<float, int> >::iterator
it = score_index_vec.begin();
it != score_index_vec.end();) {
int cur_idx = it->second;
const NormalizedBBox &cur_bbox = bboxes[cur_idx];
float cur_overlap = 0.;
cur_overlap = get_jaccard_overlap(best_bbox, cur_bbox);
if (is_linear) {
(*scores)[cur_idx] *= (1.0 - cur_overlap);
} else {
(*scores)[cur_idx] *= exp(-1.0 * pow(cur_overlap, 2) / sigma);
}
++it;
}
}
}
void apply_boxvoting_fast(std::vector<NormalizedBBox> *bboxes,
std::vector<float> *scores,
const float conf_threshold,
const float nms_threshold,
const float sigma,
std::vector<int> *indices) {
if (bboxes->size() == 0) {
return;
}
indices->clear();
for (size_t i = 0; i < bboxes->size(); ++i) {
(*bboxes)[i].mask = false;
if ((*scores)[i] > conf_threshold) {
indices->push_back(i);
}
}
for (size_t count = 0; count < indices->size(); ++count) {
int max_box_idx = 0;
for (size_t i = 1; i < indices->size(); ++i) {
int idx = indices->at(i);
if ((*bboxes)[idx].mask) {
continue;
}
if ((*scores)[idx] > (*scores)[max_box_idx]) {
max_box_idx = idx;
}
}
NormalizedBBox &best_bbox = (*bboxes)[max_box_idx];
best_bbox.score = (*scores)[max_box_idx];
best_bbox.mask = true;
float s_vt = (*scores)[max_box_idx];
float x1_vt = best_bbox.xmin * s_vt;
float x2_vt = best_bbox.xmax * s_vt;
float y1_vt = best_bbox.ymin * s_vt;
float y2_vt = best_bbox.ymax * s_vt;
for (size_t i = 0; i < indices->size(); ++i) {
int sub_it = indices->at(i);
if ((*bboxes)[sub_it].mask) {
continue;
}
float cur_overlap = 0.;
cur_overlap = get_jaccard_overlap(best_bbox, (*bboxes)[sub_it]);
if (sigma == 0) {
(*bboxes)[sub_it].mask = true;
} else {
(*scores)[sub_it] *= exp(-1.0 * pow(cur_overlap, 2) / sigma);
}
(*bboxes)[sub_it].score = (*scores)[sub_it];
// Remove it if necessary
if (cur_overlap > nms_threshold) {
float s_vt_cur = (*bboxes)[sub_it].score;
s_vt += s_vt_cur;
x1_vt += (*bboxes)[sub_it].xmin * s_vt_cur;
x2_vt += (*bboxes)[sub_it].xmax * s_vt_cur;
y1_vt += (*bboxes)[sub_it].ymin * s_vt_cur;
y2_vt += (*bboxes)[sub_it].ymax * s_vt_cur;
}
}
if (s_vt > 0.0001) {
(*bboxes)[max_box_idx].xmin = x1_vt / s_vt;
(*bboxes)[max_box_idx].xmax = x2_vt / s_vt;
(*bboxes)[max_box_idx].ymin = y1_vt / s_vt;
(*bboxes)[max_box_idx].ymax = y2_vt / s_vt;
}
}
}
void apply_nms_fast(const std::vector<NormalizedBBox> &bboxes,
const std::vector<float> &scores,
const float score_threshold,
const float nms_threshold,
const float eta,
const int top_k,
std::vector<int> *indices) {
// Sanity check.
CHECK_EQ(bboxes.size(), scores.size())
<< "bboxes and scores have different size.";
// Get top_k scores (with corresponding indices).
std::vector<std::pair<float, int> > score_index_vec;
get_max_score_index(scores, score_threshold, top_k, &score_index_vec);
// Do nms.
float adaptive_threshold = nms_threshold;
indices->clear();
while (score_index_vec.size() != 0) {
const int idx = score_index_vec.front().second;
bool keep = true;
for (size_t k = 0; k < indices->size(); ++k) {
if (keep) {
const int kept_idx = (*indices)[k];
float overlap = get_jaccard_overlap(bboxes[idx], bboxes[kept_idx]);
keep = overlap <= adaptive_threshold;
} else {
break;
}
}
if (keep) {
indices->push_back(idx);
}
score_index_vec.erase(score_index_vec.begin());
if (keep && eta < 1 && adaptive_threshold > 0.5) {
adaptive_threshold *= eta;
}
}
}
void filter_bbox(const MinDims &min_dims,
std::vector<base::ObjectPtr> *objects) {
size_t valid_obj_idx = 0;
size_t total_obj_idx = 0;
while (total_obj_idx < objects->size()) {
const auto &obj = (*objects)[total_obj_idx];
if ((obj->camera_supplement.box.ymax
- obj->camera_supplement.box.ymin) >= min_dims.min_2d_height &&
(min_dims.min_3d_height <= 0 || obj->size[2] >= min_dims.min_3d_height)
&&
(min_dims.min_3d_width <= 0
|| obj->size[1] >= min_dims.min_3d_width) &&
(min_dims.min_3d_length <= 0
|| obj->size[0] >= min_dims.min_3d_length)) {
(*objects)[valid_obj_idx] =
(*objects)[total_obj_idx];
++valid_obj_idx;
}
++total_obj_idx;
}
objects->resize(valid_obj_idx);
}
void recover_bbox(int roi_w, int roi_h, int offset_y,
std::vector<base::ObjectPtr> *objects) {
for (auto &obj : *objects) {
float xmin = obj->camera_supplement.box.xmin;
float ymin = obj->camera_supplement.box.ymin;
float xmax = obj->camera_supplement.box.xmax;
float ymax = obj->camera_supplement.box.ymax;
int x = xmin * roi_w;
int w = (xmax - xmin) * roi_w;
int y = ymin * roi_h + offset_y;
int h = (ymax - ymin) * roi_h;
base::RectF rect_det(x, y, w, h);
base::RectF rect_img(0, 0, roi_w, roi_h + offset_y);
base::RectF rect = rect_det & rect_img;
obj->camera_supplement.box = rect;
double eps = 1e-2;
// Truncation assignment based on bbox positions
if ((ymin < eps) || (ymax >= 1.0 - eps)) {
obj->camera_supplement.truncated_vertical = 0.5;
} else {
obj->camera_supplement.truncated_vertical = 0.0;
}
if ((xmin < eps) || (xmax >= 1.0 - eps)) {
obj->camera_supplement.truncated_horizontal = 0.5;
} else {
obj->camera_supplement.truncated_horizontal = 0.0;
}
obj->camera_supplement.front_box.xmin *= roi_w;
obj->camera_supplement.front_box.ymin *= roi_h;
obj->camera_supplement.front_box.xmax *= roi_w;
obj->camera_supplement.front_box.ymax *= roi_h;
obj->camera_supplement.back_box.xmin *= roi_w;
obj->camera_supplement.back_box.ymin *= roi_h;
obj->camera_supplement.back_box.xmax *= roi_w;
obj->camera_supplement.back_box.ymax *= roi_h;
obj->camera_supplement.front_box.ymin += offset_y;
obj->camera_supplement.front_box.ymax += offset_y;
obj->camera_supplement.back_box.ymin += offset_y;
obj->camera_supplement.back_box.ymax += offset_y;
}
}
void fill_base(base::ObjectPtr obj, const float *bbox) {
obj->camera_supplement.box.xmin = bbox[0];
obj->camera_supplement.box.ymin = bbox[1];
obj->camera_supplement.box.xmax = bbox[2];
obj->camera_supplement.box.ymax = bbox[3];
}
void fill_bbox3d(bool with_box3d, base::ObjectPtr obj, const float *bbox) {
if (with_box3d) {
obj->camera_supplement.alpha = bbox[0];
obj->size[2] = bbox[1];
obj->size[1] = bbox[2];
obj->size[0] = bbox[3];
}
}
void fill_frbox(bool with_frbox, base::ObjectPtr obj, const float *bbox) {
if (with_frbox) {
obj->camera_supplement.front_box.xmin = bbox[0];
obj->camera_supplement.front_box.ymin = bbox[1];
obj->camera_supplement.front_box.xmax = bbox[2];
obj->camera_supplement.front_box.ymax = bbox[3];
obj->camera_supplement.back_box.xmin = bbox[4];
obj->camera_supplement.back_box.ymin = bbox[5];
obj->camera_supplement.back_box.xmax = bbox[6];
obj->camera_supplement.back_box.ymax = bbox[7];
}
}
void fill_lights(bool with_lights, base::ObjectPtr obj, const float *bbox) {
if (with_lights) {
obj->car_light.brake_visible = bbox[0];
obj->car_light.brake_switch_on = bbox[1];
obj->car_light.left_turn_visible = bbox[2];
obj->car_light.left_turn_switch_on = bbox[3];
obj->car_light.right_turn_visible = bbox[4];
obj->car_light.right_turn_switch_on = bbox[5];
}
}
void fill_ratios(bool with_ratios, base::ObjectPtr obj, const float *bbox) {
if (with_ratios) {
// visible ratios of face a/b/c/d
obj->camera_supplement.visible_ratios[0] = bbox[0];
obj->camera_supplement.visible_ratios[1] = bbox[1];
obj->camera_supplement.visible_ratios[2] = bbox[2];
obj->camera_supplement.visible_ratios[3] = bbox[3];
// cut off on width and length (3D)
obj->camera_supplement.cut_off_ratios[0] = bbox[4];
obj->camera_supplement.cut_off_ratios[1] = bbox[5];
// cut off on left and right side (2D)
obj->camera_supplement.cut_off_ratios[2] = bbox[6];
obj->camera_supplement.cut_off_ratios[3] = bbox[7];
}
}
void fill_area_id(bool with_flag, base::ObjectPtr obj, const float *data) {
if (with_flag) {
obj->camera_supplement.area_id = static_cast<int>(data[0]);
// obj->camera_supplement.area_id_prob = data[1];
}
}
int get_area_id(float visible_ratios[4]) {
int area_id = 0;
int max_face = 0;
for (int i = 1; i < 4; ++i) {
if (visible_ratios[i] > visible_ratios[max_face]) {
max_face = i;
}
}
int left_face = (max_face + 1) % 4;
int right_face = (max_face + 3) % 4;
const float eps = 1e-3;
float max_ratio = visible_ratios[max_face];
float left_ratio = visible_ratios[left_face];
float right_ratio = visible_ratios[right_face];
memset(visible_ratios, 0, 4 * sizeof(visible_ratios[0]));
if (left_ratio < eps && right_ratio < eps) {
area_id = (max_face * 2 + 1);
visible_ratios[max_face] = 1.f;
} else if (left_ratio > right_ratio) {
area_id = (max_face * 2 + 2);
auto &&sum_ratio = left_ratio + max_ratio;
visible_ratios[max_face] = max_ratio / sum_ratio;
visible_ratios[left_face] = left_ratio / sum_ratio;
} else {
area_id = (max_face * 2);
if (area_id == 0) {
area_id = 8;
}
auto &&sum_ratio = right_ratio + max_ratio;
visible_ratios[max_face] = max_ratio / sum_ratio;
visible_ratios[right_face] = right_ratio / sum_ratio;
}
return area_id;
}
} // namespace camera
} // namespace perception
} // namespace apollo
|
8e0014a628cf2266f6cfa754ebb5afe25d62e017.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define SRAND_VALUE 1985
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} \
}
#define BLOCK_SIZE 16
#define PRINT 0
#define VERIFY 0
#define SPEEDUP 1
__global__ void calcMatrixGPU(int dim, float *matA, float *matB, float *matC)
{
const int cx=blockIdx.x*blockDim.x+threadIdx.x;
const int cy=blockIdx.y*blockDim.y+threadIdx.y;
const int tx=threadIdx.x;
const int ty=threadIdx.y;
__shared__ float as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE][BLOCK_SIZE];
float c_temp=0.0f;
// loop over blocks
for (int l=0;l<gridDim.x; l++)
{
// copy data to shared mem
as[ty][tx]=matA[cy*dim+l*BLOCK_SIZE+tx];
bs[ty][tx]=matB[(l*BLOCK_SIZE+ty)*dim+cx];
__syncthreads();
// now loop over shared mem
for (int k=0;k<BLOCK_SIZE;k++)
c_temp+=as[ty][k]*bs[k][tx];
__syncthreads();
}
matC[cy*dim+cx]=c_temp;
}
__host__ void calcMatrixCPU(int dim, float* matA, float* matB, float* matC)
{
int i, j, k;
float result = 0;
#pragma omp parallel for private(i, j, k, result)
for(j=0; j<dim; j++) {
for(i=0; i<dim; i++) {
result = 0;
for(k=0; k<dim; k++) {
result = result + (matA[(i*dim)+k] * matB[(k*+dim)+j]);
}
matC[(i*dim)+j] = result;
}
}
}
__host__ void printMat(int dim, float* mat)
{
int i, j;
for (i=0; i<dim; i++) {
for (j=0; j<dim; j++) {
printf("%8.4f ", mat[(i*dim)+j]);
}
printf("\n");
}
}
int main(int argc, char* argv[])
{
int i, j;
//Matrix A
float* h_matA;
float* d_matA;
//Matrix B
float* h_matB;
float* d_matB;
//Matrix C
float* h_matC;
float* d_matC;
//CUDA-Events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int dim = 4*1024;
size_t matBytes = sizeof(float)*dim*dim;
printf("Memory: %f MByte\n", (double) (3*matBytes)/1024/1024);
// Memory allocation on host
CUDA_CHECK_RETURN( hipHostMalloc((void**)&h_matA, matBytes, hipHostMallocDefault) );
CUDA_CHECK_RETURN( hipHostMalloc((void**)&h_matB, matBytes, hipHostMallocDefault) );
CUDA_CHECK_RETURN( hipHostMalloc((void**)&h_matC, matBytes, hipHostMallocDefault) );
// Allocate device worlds
CUDA_CHECK_RETURN( hipMalloc(&d_matA, matBytes) );
CUDA_CHECK_RETURN( hipMalloc(&d_matB, matBytes) );
CUDA_CHECK_RETURN( hipMalloc(&d_matC, matBytes) );
// Assign initial data
srand(SRAND_VALUE);
for(i = 0; i<dim; i++) {
for(j = 0; j<dim; j++) {
h_matA[(i*dim)+j] = rand() % 10;
h_matB[(i*dim)+j] = rand() % 10;
}
}
CUDA_CHECK_RETURN( hipEventRecord(start, 0) );
// Copy data from host to device
CUDA_CHECK_RETURN( hipMemcpy(d_matA, h_matA, matBytes, hipMemcpyHostToDevice) );
CUDA_CHECK_RETURN( hipMemcpy(d_matB, h_matB, matBytes, hipMemcpyHostToDevice) );
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
int linGrid = (int)ceil(dim/(float)BLOCK_SIZE);
dim3 dimGrid(linGrid,linGrid);
hipLaunchKernelGGL(( calcMatrixGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, dim, d_matA, d_matB, d_matC);
CUDA_CHECK_RETURN( hipDeviceSynchronize()); // Wait for the GPU launched work
CUDA_CHECK_RETURN( hipGetLastError());
// Copy data from device to host
CUDA_CHECK_RETURN( hipMemcpy(h_matC, d_matC, matBytes, hipMemcpyDeviceToHost) );
CUDA_CHECK_RETURN( hipEventRecord(stop, 0) );
CUDA_CHECK_RETURN( hipEventSynchronize(stop) );
float runtime_gpu;
hipEventElapsedTime(&runtime_gpu, start, stop);
printf("\nElapsed GPU time: %8.2f ms\n", runtime_gpu);
#if SPEEDUP
CUDA_CHECK_RETURN( hipEventRecord(start, 0) );
calcMatrixCPU(dim, h_matA, h_matB, h_matC);
CUDA_CHECK_RETURN( hipEventRecord(stop, 0) );
CUDA_CHECK_RETURN( hipEventSynchronize(stop) );
float runtime_cpu;
hipEventElapsedTime(&runtime_cpu, start, stop);
printf("\nElapsed CPU time: %8.2f ms\n", runtime_cpu);
printf("\nSpeedup: %8.2f\n", runtime_cpu/runtime_gpu);
#endif
#if VERIFY
float* h_verify;
CUDA_CHECK_RETURN( hipHostMalloc((void**)&h_verify, matBytes, hipHostMallocDefault) );
calcMatrixCPU(dim, h_matA, h_matB, h_verify);
int correct = 0;
for(i = 0; i < (dim*dim); i++) {
if (h_matC[i] != h_verify[i]){
printf("Error: %8.4f - expected: %8.4f @( %i, %i)\n", h_matC[i], h_verify[i], i/dim, i%dim);
break;
} else {
correct++;
}
}
if(correct == (dim*dim))
printf("Correct results\n");
CUDA_CHECK_RETURN( hipHostFree(h_verify) );
#endif
#if PRINT
printf("Matrix A:\n");
printMat(dim, h_matA);
printf("\nMatrix B:\n");
printMat(dim, h_matB);
printf("\nMatrix C:\n");
printMat(dim, h_matC);
#endif
// Release memory
CUDA_CHECK_RETURN( hipHostFree(h_matA) );
CUDA_CHECK_RETURN( hipHostFree(h_matB) );
CUDA_CHECK_RETURN( hipHostFree(h_matC) );
CUDA_CHECK_RETURN( hipFree(d_matA) );
CUDA_CHECK_RETURN( hipFree(d_matB) );
CUDA_CHECK_RETURN( hipFree(d_matC) );
return 0;
}
| 8e0014a628cf2266f6cfa754ebb5afe25d62e017.cu | #include <stdio.h>
#include <stdlib.h>
#define SRAND_VALUE 1985
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} \
}
#define BLOCK_SIZE 16
#define PRINT 0
#define VERIFY 0
#define SPEEDUP 1
__global__ void calcMatrixGPU(int dim, float *matA, float *matB, float *matC)
{
const int cx=blockIdx.x*blockDim.x+threadIdx.x;
const int cy=blockIdx.y*blockDim.y+threadIdx.y;
const int tx=threadIdx.x;
const int ty=threadIdx.y;
__shared__ float as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE][BLOCK_SIZE];
float c_temp=0.0f;
// loop over blocks
for (int l=0;l<gridDim.x; l++)
{
// copy data to shared mem
as[ty][tx]=matA[cy*dim+l*BLOCK_SIZE+tx];
bs[ty][tx]=matB[(l*BLOCK_SIZE+ty)*dim+cx];
__syncthreads();
// now loop over shared mem
for (int k=0;k<BLOCK_SIZE;k++)
c_temp+=as[ty][k]*bs[k][tx];
__syncthreads();
}
matC[cy*dim+cx]=c_temp;
}
__host__ void calcMatrixCPU(int dim, float* matA, float* matB, float* matC)
{
int i, j, k;
float result = 0;
#pragma omp parallel for private(i, j, k, result)
for(j=0; j<dim; j++) {
for(i=0; i<dim; i++) {
result = 0;
for(k=0; k<dim; k++) {
result = result + (matA[(i*dim)+k] * matB[(k*+dim)+j]);
}
matC[(i*dim)+j] = result;
}
}
}
__host__ void printMat(int dim, float* mat)
{
int i, j;
for (i=0; i<dim; i++) {
for (j=0; j<dim; j++) {
printf("%8.4f ", mat[(i*dim)+j]);
}
printf("\n");
}
}
int main(int argc, char* argv[])
{
int i, j;
//Matrix A
float* h_matA;
float* d_matA;
//Matrix B
float* h_matB;
float* d_matB;
//Matrix C
float* h_matC;
float* d_matC;
//CUDA-Events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int dim = 4*1024;
size_t matBytes = sizeof(float)*dim*dim;
printf("Memory: %f MByte\n", (double) (3*matBytes)/1024/1024);
// Memory allocation on host
CUDA_CHECK_RETURN( cudaHostAlloc((void**)&h_matA, matBytes, cudaHostAllocDefault) );
CUDA_CHECK_RETURN( cudaHostAlloc((void**)&h_matB, matBytes, cudaHostAllocDefault) );
CUDA_CHECK_RETURN( cudaHostAlloc((void**)&h_matC, matBytes, cudaHostAllocDefault) );
// Allocate device worlds
CUDA_CHECK_RETURN( cudaMalloc(&d_matA, matBytes) );
CUDA_CHECK_RETURN( cudaMalloc(&d_matB, matBytes) );
CUDA_CHECK_RETURN( cudaMalloc(&d_matC, matBytes) );
// Assign initial data
srand(SRAND_VALUE);
for(i = 0; i<dim; i++) {
for(j = 0; j<dim; j++) {
h_matA[(i*dim)+j] = rand() % 10;
h_matB[(i*dim)+j] = rand() % 10;
}
}
CUDA_CHECK_RETURN( cudaEventRecord(start, 0) );
// Copy data from host to device
CUDA_CHECK_RETURN( cudaMemcpy(d_matA, h_matA, matBytes, cudaMemcpyHostToDevice) );
CUDA_CHECK_RETURN( cudaMemcpy(d_matB, h_matB, matBytes, cudaMemcpyHostToDevice) );
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
int linGrid = (int)ceil(dim/(float)BLOCK_SIZE);
dim3 dimGrid(linGrid,linGrid);
calcMatrixGPU<<<dimGrid, dimBlock>>>(dim, d_matA, d_matB, d_matC);
CUDA_CHECK_RETURN( cudaThreadSynchronize()); // Wait for the GPU launched work
CUDA_CHECK_RETURN( cudaGetLastError());
// Copy data from device to host
CUDA_CHECK_RETURN( cudaMemcpy(h_matC, d_matC, matBytes, cudaMemcpyDeviceToHost) );
CUDA_CHECK_RETURN( cudaEventRecord(stop, 0) );
CUDA_CHECK_RETURN( cudaEventSynchronize(stop) );
float runtime_gpu;
cudaEventElapsedTime(&runtime_gpu, start, stop);
printf("\nElapsed GPU time: %8.2f ms\n", runtime_gpu);
#if SPEEDUP
CUDA_CHECK_RETURN( cudaEventRecord(start, 0) );
calcMatrixCPU(dim, h_matA, h_matB, h_matC);
CUDA_CHECK_RETURN( cudaEventRecord(stop, 0) );
CUDA_CHECK_RETURN( cudaEventSynchronize(stop) );
float runtime_cpu;
cudaEventElapsedTime(&runtime_cpu, start, stop);
printf("\nElapsed CPU time: %8.2f ms\n", runtime_cpu);
printf("\nSpeedup: %8.2f\n", runtime_cpu/runtime_gpu);
#endif
#if VERIFY
float* h_verify;
CUDA_CHECK_RETURN( cudaHostAlloc((void**)&h_verify, matBytes, cudaHostAllocDefault) );
calcMatrixCPU(dim, h_matA, h_matB, h_verify);
int correct = 0;
for(i = 0; i < (dim*dim); i++) {
if (h_matC[i] != h_verify[i]){
printf("Error: %8.4f - expected: %8.4f @( %i, %i)\n", h_matC[i], h_verify[i], i/dim, i%dim);
break;
} else {
correct++;
}
}
if(correct == (dim*dim))
printf("Correct results\n");
CUDA_CHECK_RETURN( cudaFreeHost(h_verify) );
#endif
#if PRINT
printf("Matrix A:\n");
printMat(dim, h_matA);
printf("\nMatrix B:\n");
printMat(dim, h_matB);
printf("\nMatrix C:\n");
printMat(dim, h_matC);
#endif
// Release memory
CUDA_CHECK_RETURN( cudaFreeHost(h_matA) );
CUDA_CHECK_RETURN( cudaFreeHost(h_matB) );
CUDA_CHECK_RETURN( cudaFreeHost(h_matC) );
CUDA_CHECK_RETURN( cudaFree(d_matA) );
CUDA_CHECK_RETURN( cudaFree(d_matB) );
CUDA_CHECK_RETURN( cudaFree(d_matC) );
return 0;
}
|
38d81d9e87bca4c22c25185d525db3ebd634e5f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
struct coo
{
int base_id;
int query_id;
float distance;
float for_align;
};
__global__ void Kernel(unsigned int * ptr){
atomicAdd(ptr, 1);
}
int main()
{
size_t batch_len = 8192;
size_t data_num = 900000;
size_t data_dim = 34;
// device
int device = 0;
hipSetDevice(device);
printf("device checked\n");
//
float *data_m, *result_m, *module_m;
coo *output_m;
unsigned int *take_num_m;
size_t data_size = data_num * data_dim * sizeof(float);
// _m means managed
hipMallocManaged((void **)&data_m, data_size); //
// memcpy(data_m, node_data, data_size);
// hipMemPrefetchAsync(data_d,data_size,0,NULL);
hipMallocManaged((void **)&module_m, data_num * sizeof(float)); //
// hipMemPrefetchAsync(module_d,data_num*sizeof(float),0,NULL);
hipMallocManaged((void **)&result_m, batch_len * batch_len * sizeof(float)); //
// hipMemPrefetchAsync(result_d,batch_num*batch_num*sizeof(float),0,NULL);
hipMallocManaged((void **)&output_m, batch_len * batch_len * sizeof(coo)); // coo
// hipMemPrefetchAsync(output_d,batch_num*batch_num*sizeof(coo),0,NULL);
hipMallocManaged((void **)&take_num_m, sizeof(int)); //
// hipMemPrefetchAsync(take_num_m,sizeof(int),0,NULL);
hipLaunchKernelGGL(( Kernel), dim3(223),dim3(14), 0, 0, take_num_m);
printf("pre-allocation done.\n");
//
hipFree(data_m);
hipFree(result_m);
hipFree(module_m);
hipFree(output_m);
hipFree(take_num_m);
return 0;
}
| 38d81d9e87bca4c22c25185d525db3ebd634e5f8.cu | #include "cuda_runtime.h"
#include "stdio.h"
struct coo
{
int base_id;
int query_id;
float distance;
float for_align;
};
__global__ void Kernel(unsigned int * ptr){
atomicAdd(ptr, 1);
}
int main()
{
size_t batch_len = 8192;
size_t data_num = 900000;
size_t data_dim = 34;
// 检查device状况
int device = 0;
cudaSetDevice(device);
printf("device checked\n");
// 预先分配空间
float *data_m, *result_m, *module_m;
coo *output_m;
unsigned int *take_num_m;
size_t data_size = data_num * data_dim * sizeof(float);
// _m means managed
cudaMallocManaged((void **)&data_m, data_size); // 原始数据
// memcpy(data_m, node_data, data_size);
// cudaMemPrefetchAsync(data_d,data_size,0,NULL);
cudaMallocManaged((void **)&module_m, data_num * sizeof(float)); // 模方数据
// cudaMemPrefetchAsync(module_d,data_num*sizeof(float),0,NULL);
cudaMallocManaged((void **)&result_m, batch_len * batch_len * sizeof(float)); // 距离结果数据
// cudaMemPrefetchAsync(result_d,batch_num*batch_num*sizeof(float),0,NULL);
cudaMallocManaged((void **)&output_m, batch_len * batch_len * sizeof(coo)); // 输出coo数据
// cudaMemPrefetchAsync(output_d,batch_num*batch_num*sizeof(coo),0,NULL);
cudaMallocManaged((void **)&take_num_m, sizeof(int)); // 取边数目
// cudaMemPrefetchAsync(take_num_m,sizeof(int),0,NULL);
Kernel<<<223,14>>>(take_num_m);
printf("pre-allocation done.\n");
// 回收空间
cudaFree(data_m);
cudaFree(result_m);
cudaFree(module_m);
cudaFree(output_m);
cudaFree(take_num_m);
return 0;
}
|
4312e070001496ebc9600a5bfcfd522a49bdc85c.hip | // !!! This is a file automatically generated by hipify!!!
#include <matazure/tensor>
#include <image_utility.hpp>
using namespace matazure;
typedef pointb<3> rgb;
int main(int argc, char *argv[]) {
//
if (argc < 2){
printf("please input a 3 channel(rbg) image path");
return -1;
}
auto ts_rgb = read_rgb_image(argv[1]);
//CUDA
#ifdef USE_ROCM
auto gts_rgb = mem_clone(ts_rgb, device_tag{});
#else
auto >s_rgb = ts_rgb;
#endif
//
auto glts_rgb_shift_zero = gts_rgb - rgb::all(128);
auto glts_rgb_stride = stride(glts_rgb_shift_zero, 2);
auto glts_rgb_normalized = cast<pointf<3>>(glts_rgb_stride) / pointf<3>::all(128.0f);
//memory,
auto gts_rgb_normalized = glts_rgb_normalized.persist();
#ifdef USE_ROCM
hip::device_synchronize();
auto ts_rgb_normalized = mem_clone(gts_rgb_normalized, host_tag{});
#else
auto &ts_rgb_normalized = gts_rgb_normalized;
#endif
//
tensor<float, 2> ts_red(ts_rgb_normalized.shape());
tensor<float, 2> ts_green(ts_rgb_normalized.shape());
tensor<float, 2> ts_blue(ts_rgb_normalized.shape());
//ziptupletuple
auto ts_zip_rgb = zip(ts_red, ts_green, ts_blue);
//tuplepoint<byte, 3>
auto ts_zip_point = point_view(ts_zip_rgb);
//ts_red, ts_green, ts_bluets_zip_point
copy(ts_rgb_normalized, ts_zip_point);
//raw
auto output_red_path = argc < 3 ? "red.raw_data" : argv[2];
auto output_green_path = argc < 4 ? "green.raw_data" : argv[3];
auto output_blue_path = argc < 5 ? "blue.raw_data" : argv[4];
io::write_raw_data(output_red_path, ts_red);
io::write_raw_data(output_green_path, ts_green);
io::write_raw_data(output_blue_path, ts_blue);
return 0;
}
| 4312e070001496ebc9600a5bfcfd522a49bdc85c.cu | #include <matazure/tensor>
#include <image_utility.hpp>
using namespace matazure;
typedef pointb<3> rgb;
int main(int argc, char *argv[]) {
//加载图像
if (argc < 2){
printf("please input a 3 channel(rbg) image path");
return -1;
}
auto ts_rgb = read_rgb_image(argv[1]);
//选择是否使用CUDA
#ifdef USE_CUDA
auto gts_rgb = mem_clone(ts_rgb, device_tag{});
#else
auto >s_rgb = ts_rgb;
#endif
//图像像素归一化
auto glts_rgb_shift_zero = gts_rgb - rgb::all(128);
auto glts_rgb_stride = stride(glts_rgb_shift_zero, 2);
auto glts_rgb_normalized = cast<pointf<3>>(glts_rgb_stride) / pointf<3>::all(128.0f);
//前面并未进行实质的计算,这一步将上面的运算合并处理并把结果写入到memory中, 避免了额外的内存开销
auto gts_rgb_normalized = glts_rgb_normalized.persist();
#ifdef USE_CUDA
cuda::device_synchronize();
auto ts_rgb_normalized = mem_clone(gts_rgb_normalized, host_tag{});
#else
auto &ts_rgb_normalized = gts_rgb_normalized;
#endif
//定义三个通道的图像数据
tensor<float, 2> ts_red(ts_rgb_normalized.shape());
tensor<float, 2> ts_green(ts_rgb_normalized.shape());
tensor<float, 2> ts_blue(ts_rgb_normalized.shape());
//zip操作,就返回tuple数据,tuple的元素为上面三个通道对应元素的引用
auto ts_zip_rgb = zip(ts_red, ts_green, ts_blue);
//让tuple元素可以和point<byte, 3>可以相互转换
auto ts_zip_point = point_view(ts_zip_rgb);
//拷贝结果到ts_red, ts_green, ts_blue中,因为ts_zip_point的元素是指向这三个通道的引用
copy(ts_rgb_normalized, ts_zip_point);
//保存raw数据
auto output_red_path = argc < 3 ? "red.raw_data" : argv[2];
auto output_green_path = argc < 4 ? "green.raw_data" : argv[3];
auto output_blue_path = argc < 5 ? "blue.raw_data" : argv[4];
io::write_raw_data(output_red_path, ts_red);
io::write_raw_data(output_green_path, ts_green);
io::write_raw_data(output_blue_path, ts_blue);
return 0;
}
|
944a5b95623e2f91522f20166894e8757237c76c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************
Authors: Pedro Macedo Flores and Hudson Braga Vieira
Projet: Batch merge path sort
Sorbonne Universit - Master 2
Massive parallel programming on GPU devices for Big Data
Paris, mars 2021
*******************************************/
#include <cuda_device_runtime_api.h>
#include <iostream>
#include <ostream>
#include <string>
#include "utils.h"
// Has to be defined in the compilation in order to get the correct value of the
// macros __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
__device__ int pow(int b, int i){
int val = 1;
for (int j = 1; j <= i; j++){
val *= b;
}
return val;
}
__device__ void trifusion(int * a, int* b, int * sol, int modA, int modB, int idx){
int * K = new int[2];
int * P = new int[2];
int * Q = new int[2];
// initial setup
bool aux1 = (idx > modA);
K[0]= P[1] = aux1* (idx-modA);
K[1]= P[0] = aux1*modA + (1-aux1)*idx;
bool loop_bool = true;
while(loop_bool){
/*********************
set Q position after K or P move following binary search.
( P move 1 segment bellow Q or K move 1 segment above Q if break condition is not met yet)
*********************/
// mid distance between K and P
int offset = abs(K[1]-P[1])/2;
// midpoint in diagonal
Q[0]= K[0]+offset;
Q[1]= K[1]-offset;
/********************
P move one segment bellow Q in schema 1, 1 (bottom left = 1)
K move one segment above Q in schema 0, 0 (upper right = 0 )
break condition: schema 0, 1
*********************/
bool bottom_left = (Q[1]>=0)*(Q[0] <= modB)*(!((a[min(Q[1],modA-1)] <= b[max(Q[0]-1,0)])*(Q[0] !=0)* (Q[1] != modA)));
bool upper_right = !((Q[0]!= modB)* (Q[1]!=0) * (a[max(Q[1]-1,0)] > b[min(Q[0],modB-1)]));
// in break condition, tells if upper left is 0 or 1.
bool from_upper_or_left = (Q[1] < modA)* (!((Q[0]!=modB)*(a[min(Q[1],modA-1)] > b[min(Q[0],modB-1)])));
P[0] = (!bottom_left)*(Q[0]-1) + bottom_left*P[0];
P[1] = (!bottom_left)*(Q[1]+1) + bottom_left*P[1];
K[0] = bottom_left* (!upper_right) * (Q[0]+1) + (!(bottom_left* (!upper_right)))*K[0];
K[1] = bottom_left* (!upper_right) *(Q[1]-1) + (!(bottom_left* (!upper_right)))*K[1];
// only really updates in schema 0,1
sol[idx]= bottom_left * upper_right* (from_upper_or_left*a[min(Q[1],modA-1)] + (!from_upper_or_left)*b[min(Q[0],modB-1)]);
loop_bool = !(upper_right* bottom_left);
}
delete [] K;
delete [] P;
delete [] Q;
}
__global__ void trifusion_kernel_test(int * a, int* b, int * sol, int modA, int modB){
trifusion(a, b, sol, modA, modB, threadIdx.x);
}
void trifusion_test(void){
int M = 2;
int modA= 1 , modB = M-modA;
int maxAB = (modA > modB)? modA : modB;
// random sorted vectors
int * a = rand_int_array_sorted(modA);
int * b = rand_int_array_sorted(modB);
int * aGPU, *bGPU, * solGPU, *solCPU = new int[M];
// memory alloc
testCUDA(hipMalloc(&aGPU, maxAB*sizeof(int)));
testCUDA(hipMalloc(&bGPU, maxAB*sizeof(int)));
testCUDA(hipMalloc(&solGPU, M*sizeof(int)));
/***********************
CPU run
************************/
Timer timer;
timer.start();
int * sol = merge_sequential(a, b, modA, modB);
timer.add();
if(check_solution(sol, a, b, modA, modB)) std::cout << "Sequential solution OK" << std::endl;
else std::cout << "Sequential solution Wrong" << std::endl;
std::cout << "Elapsed CPU time: " << timer.getsum()*1000 << " ms" << std::endl << std::endl;
/***********************
GPU run
************************/
testCUDA(hipMemcpy(aGPU,a, modA * sizeof(int), hipMemcpyHostToDevice));
testCUDA(hipMemcpy(bGPU,b, modB * sizeof(int), hipMemcpyHostToDevice));
// timer block
float TimeVar;
hipEvent_t start, stop;
testCUDA(hipEventCreate(&start));
testCUDA(hipEventCreate(&stop));
testCUDA(hipEventRecord(start,0));
// timer block
// execution block
hipLaunchKernelGGL(( trifusion_kernel_test), dim3(1), dim3(M), 0, 0, aGPU, bGPU, solGPU, modA, modB);
// execution block
//timer block
testCUDA(hipEventRecord(stop,0));
testCUDA(hipEventSynchronize(stop));
testCUDA(hipEventElapsedTime(&TimeVar, start, stop));
// timer block
testCUDA(hipMemcpy(solCPU, solGPU, M * sizeof(int), hipMemcpyDeviceToHost));
// print results
f(i, modA){
std::cout << a[i] << "\t" ;
}
std::cout<< std::endl;
f(i, modB){
std::cout << b[i] << "\t" ;
}
std::cout<< std::endl;
f(i, M){
std::cout << solCPU[i] << "\t" ;
}
std::cout<< std::endl;
if(check_solution(solCPU, a, b, modA, modB)) std::cout << "Parallel solution OK" << std::endl;
else std::cout << "Parallel solution Wrong" << std::endl;
std::cout << "Elapsed GPU time: " << TimeVar << " ms" << std::endl << std::endl;
/***********************
Memory Free
***********************/
testCUDA(hipFree(aGPU));
testCUDA(hipFree(bGPU));
testCUDA(hipFree(solGPU));
// memory free
delete [] a;
delete [] b;
delete [] sol;
delete [] solCPU;
}
__global__ void kernel_batch_sort_shared(int *M, int i, int d){
int size = ((int) pow(2,i));
extern __shared__ int A[];
int offset = (threadIdx.x /(2*size)) * 2*size;
// device function
A[threadIdx.x+(i%2)*d]= M[threadIdx.x+(i%2)*d];
__syncthreads();
trifusion(A+offset+(i%2)*d,A+offset+(i%2)*d+size, A+offset+(!(i%2))*d, size, size, threadIdx.x%(2*size));
M[(!(i%2))*d + threadIdx.x]=A[threadIdx.x+(!(i%2))*d];
}
__global__ void kernel_batch_sort(int * M, int i, int mul, int d){
// which sort array?
int k = (int) blockIdx.x/mul;
//printf("%d\n", blockIdx.x % mul);
// which sizes of A e B ?
int size = ((int) pow(2,i));
// thread 2 from second block must represents thread 1025 of a virtual "superblock", where superblock is mul blocks together)
int intermediate_threadIdx = (blockIdx.x % mul) * blockDim.x + threadIdx.x ;
// which merge? find offset of M corresponding to A and B
int offset = k*2*d + (intermediate_threadIdx /((int) pow(2, (i+1)))) * pow(2, i+1);
int idx_start_a = offset + (i%2)*d;
int idx_start_b = idx_start_a + size;
int m = intermediate_threadIdx % ((int) pow(2, (i+1)));
// device function
trifusion(M+ idx_start_a, M+idx_start_b, M+offset + (!(i%2))*d, size, size, m);
}
void batch_sort(int d, int batch_dim, int max_threads_per_block,bool shared){
// store on GPU a vectot M of size 2 * batch_dim * d
// copy each vector j to A[j][0....d] (setting 0 to A[j][d+1, ...2d-1]
// A[batch_id][ 0, ... d//2] keeps old values and A[batch_id][d//2+1, ....d] new ones or vice versa, using i%2 trick
int * mCPU = rand_int_array(2*d*batch_dim);
int * mSOL = new int[2*d*batch_dim];
// print result
/*
f(i, 2*d*batch_dim){
std::cout << mCPU[i] << "\t";
}
std::cout<< std::endl;*/
int * mGPU;
testCUDA(hipMalloc(&mGPU,2*d*batch_dim*sizeof(int)));
testCUDA(hipMemcpy(mGPU,mCPU, 2*d*batch_dim*sizeof(int), hipMemcpyHostToDevice));
// inplace sort. mCPU will be used in the future to compare sol from GPU
cpu_batch_sort(mCPU, d , batch_dim);
int mul = (d>max_threads_per_block)? (d / max_threads_per_block) : 1;
// timer block
float TimeVar;
hipEvent_t start, stop;
if (shared){
testCUDA(hipEventCreate(&start));
testCUDA(hipEventCreate(&stop));
testCUDA(hipEventRecord(start,0));
// timer block
// execution block
f(i, ((int) (log(d)/ log(2)))){
// for each vector to sort, 2**( log d - i -1) merges to do, each merge take 2**(i+1) threads => always d threads on total
//kernel_batch_sort<<< batch_dim*mul, (d > max_threads_per_block)? max_threads_per_block: d >>> (mGPU, i,mul, d);
hipLaunchKernelGGL(( kernel_batch_sort_shared), dim3(1), dim3(d) ,2*d*sizeof(int), 0, mGPU, i, d);
}
// execution block
//timer block
testCUDA(hipEventRecord(stop,0));
testCUDA(hipEventSynchronize(stop));
testCUDA(hipEventElapsedTime(&TimeVar, start, stop));
} else{
testCUDA(hipEventCreate(&start));
testCUDA(hipEventCreate(&stop));
testCUDA(hipEventRecord(start,0));
// timer block
// execution block
f(i, ((int) (log(d)/ log(2)))){
// for each vector to sort, 2**( log d - i -1) merges to do, each merge take 2**(i+1) threads => always d threads on total
hipLaunchKernelGGL(( kernel_batch_sort), dim3(batch_dim*mul), dim3((d > max_threads_per_block)? max_threads_per_block: d) , 0, 0, mGPU, i,mul, d);
//kernel_batch_sort_shared<<< batch_dim*mul, (d > max_threads_per_block)? max_threads_per_block: d ,2*d*sizeof(int)>>> (mGPU, i, d);
}
// execution block
//timer block
testCUDA(hipEventRecord(stop,0));
testCUDA(hipEventSynchronize(stop));
testCUDA(hipEventElapsedTime(&TimeVar, start, stop));
}
// timer block
testCUDA(hipMemcpy(mSOL, mGPU, 2*d*batch_dim*sizeof(int), hipMemcpyDeviceToHost));
// print result
/*
f(i, 2*d*batch_dim){
std::cout << mSOL[i] << "\t";
}
std::cout<< std::endl;*/
if(check_solution_batch(mCPU, mSOL, d, batch_dim)) std::cout << "Parallel solution OK" << std::endl;
else std::cout << "Parallel solution Wrong" << std::endl;
std::cout << "Elapsed GPU time: " << TimeVar << " ms" << std::endl << std::endl;
// memory free
testCUDA(hipFree(mGPU));
delete [] mCPU;
delete [] mSOL;
}
int main(int argc, char * argv[]){
// cin and cout as fast as printf
std::ios_base::sync_with_stdio(false);
// function to test merge algorithm. Tested
//trifusion_test();
int d = 4;
int batch_dim = 1;
bool shared = false;
if(argc==3){
d = std::stoi(argv[1]);
batch_dim= std::stoi(argv[2]);
}
if (d <= 1024 && batch_dim == 1){
char c;
printf("Use shared memory? (y/n): ");
scanf("%c", &c);
if (c=='y'){
shared = true;
}
}
if(isPowerOfTwo(d)){
// check the number of SM and the parameters given
hipDeviceProp_t prop;
testCUDA(hipGetDeviceProperties(&prop,0));
std::cout << "GPU informations " << std::endl;
std::cout << "-----------------" << std::endl;
std::cout << "Max threads per block: " << prop.maxThreadsPerBlock << std::endl;
std::cout << "SM count: " << prop.multiProcessorCount << std::endl << std::endl;
int mul = (d>prop.maxThreadsPerBlock)? (d / prop.maxThreadsPerBlock) : 1;
if(mul*batch_dim > prop.multiProcessorCount){
std::cout << "WARNING: number of blocks greater than GPU SM count" << std::endl << std::endl;
}
batch_sort(d,batch_dim, prop.maxThreadsPerBlock,shared);
}else{
std::cout << "ABORTED: d is not power of 2" << std::endl;
}
return 0;
}
| 944a5b95623e2f91522f20166894e8757237c76c.cu | /******************************************
Authors: Pedro Macedo Flores and Hudson Braga Vieira
Projet: Batch merge path sort
Sorbonne Université - Master 2
Massive parallel programming on GPU devices for Big Data
Paris, mars 2021
*******************************************/
#include <cuda_device_runtime_api.h>
#include <iostream>
#include <ostream>
#include <string>
#include "utils.h"
// Has to be defined in the compilation in order to get the correct value of the
// macros __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
__device__ int pow(int b, int i){
int val = 1;
for (int j = 1; j <= i; j++){
val *= b;
}
return val;
}
__device__ void trifusion(int * a, int* b, int * sol, int modA, int modB, int idx){
int * K = new int[2];
int * P = new int[2];
int * Q = new int[2];
// initial setup
bool aux1 = (idx > modA);
K[0]= P[1] = aux1* (idx-modA);
K[1]= P[0] = aux1*modA + (1-aux1)*idx;
bool loop_bool = true;
while(loop_bool){
/*********************
set Q position after K or P move following binary search.
( P move 1 segment bellow Q or K move 1 segment above Q if break condition is not met yet)
*********************/
// mid distance between K and P
int offset = abs(K[1]-P[1])/2;
// midpoint in diagonal
Q[0]= K[0]+offset;
Q[1]= K[1]-offset;
/********************
P move one segment bellow Q in schema 1, 1 (bottom left = 1)
K move one segment above Q in schema 0, 0 (upper right = 0 )
break condition: schema 0, 1
*********************/
bool bottom_left = (Q[1]>=0)*(Q[0] <= modB)*(!((a[min(Q[1],modA-1)] <= b[max(Q[0]-1,0)])*(Q[0] !=0)* (Q[1] != modA)));
bool upper_right = !((Q[0]!= modB)* (Q[1]!=0) * (a[max(Q[1]-1,0)] > b[min(Q[0],modB-1)]));
// in break condition, tells if upper left is 0 or 1.
bool from_upper_or_left = (Q[1] < modA)* (!((Q[0]!=modB)*(a[min(Q[1],modA-1)] > b[min(Q[0],modB-1)])));
P[0] = (!bottom_left)*(Q[0]-1) + bottom_left*P[0];
P[1] = (!bottom_left)*(Q[1]+1) + bottom_left*P[1];
K[0] = bottom_left* (!upper_right) * (Q[0]+1) + (!(bottom_left* (!upper_right)))*K[0];
K[1] = bottom_left* (!upper_right) *(Q[1]-1) + (!(bottom_left* (!upper_right)))*K[1];
// only really updates in schema 0,1
sol[idx]= bottom_left * upper_right* (from_upper_or_left*a[min(Q[1],modA-1)] + (!from_upper_or_left)*b[min(Q[0],modB-1)]);
loop_bool = !(upper_right* bottom_left);
}
delete [] K;
delete [] P;
delete [] Q;
}
__global__ void trifusion_kernel_test(int * a, int* b, int * sol, int modA, int modB){
trifusion(a, b, sol, modA, modB, threadIdx.x);
}
void trifusion_test(void){
int M = 2;
int modA= 1 , modB = M-modA;
int maxAB = (modA > modB)? modA : modB;
// random sorted vectors
int * a = rand_int_array_sorted(modA);
int * b = rand_int_array_sorted(modB);
int * aGPU, *bGPU, * solGPU, *solCPU = new int[M];
// memory alloc
testCUDA(cudaMalloc(&aGPU, maxAB*sizeof(int)));
testCUDA(cudaMalloc(&bGPU, maxAB*sizeof(int)));
testCUDA(cudaMalloc(&solGPU, M*sizeof(int)));
/***********************
CPU run
************************/
Timer timer;
timer.start();
int * sol = merge_sequential(a, b, modA, modB);
timer.add();
if(check_solution(sol, a, b, modA, modB)) std::cout << "Sequential solution OK" << std::endl;
else std::cout << "Sequential solution Wrong" << std::endl;
std::cout << "Elapsed CPU time: " << timer.getsum()*1000 << " ms" << std::endl << std::endl;
/***********************
GPU run
************************/
testCUDA(cudaMemcpy(aGPU,a, modA * sizeof(int), cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(bGPU,b, modB * sizeof(int), cudaMemcpyHostToDevice));
// timer block
float TimeVar;
cudaEvent_t start, stop;
testCUDA(cudaEventCreate(&start));
testCUDA(cudaEventCreate(&stop));
testCUDA(cudaEventRecord(start,0));
// timer block
// execution block
trifusion_kernel_test<<<1, M>>>(aGPU, bGPU, solGPU, modA, modB);
// execution block
//timer block
testCUDA(cudaEventRecord(stop,0));
testCUDA(cudaEventSynchronize(stop));
testCUDA(cudaEventElapsedTime(&TimeVar, start, stop));
// timer block
testCUDA(cudaMemcpy(solCPU, solGPU, M * sizeof(int), cudaMemcpyDeviceToHost));
// print results
f(i, modA){
std::cout << a[i] << "\t" ;
}
std::cout<< std::endl;
f(i, modB){
std::cout << b[i] << "\t" ;
}
std::cout<< std::endl;
f(i, M){
std::cout << solCPU[i] << "\t" ;
}
std::cout<< std::endl;
if(check_solution(solCPU, a, b, modA, modB)) std::cout << "Parallel solution OK" << std::endl;
else std::cout << "Parallel solution Wrong" << std::endl;
std::cout << "Elapsed GPU time: " << TimeVar << " ms" << std::endl << std::endl;
/***********************
Memory Free
***********************/
testCUDA(cudaFree(aGPU));
testCUDA(cudaFree(bGPU));
testCUDA(cudaFree(solGPU));
// memory free
delete [] a;
delete [] b;
delete [] sol;
delete [] solCPU;
}
__global__ void kernel_batch_sort_shared(int *M, int i, int d){
int size = ((int) pow(2,i));
extern __shared__ int A[];
int offset = (threadIdx.x /(2*size)) * 2*size;
// device function
A[threadIdx.x+(i%2)*d]= M[threadIdx.x+(i%2)*d];
__syncthreads();
trifusion(A+offset+(i%2)*d,A+offset+(i%2)*d+size, A+offset+(!(i%2))*d, size, size, threadIdx.x%(2*size));
M[(!(i%2))*d + threadIdx.x]=A[threadIdx.x+(!(i%2))*d];
}
__global__ void kernel_batch_sort(int * M, int i, int mul, int d){
// which sort array?
int k = (int) blockIdx.x/mul;
//printf("%d\n", blockIdx.x % mul);
// which sizes of A e B ?
int size = ((int) pow(2,i));
// thread 2 from second block must represents thread 1025 of a virtual "superblock", where superblock is mul blocks together)
int intermediate_threadIdx = (blockIdx.x % mul) * blockDim.x + threadIdx.x ;
// which merge? find offset of M corresponding to A and B
int offset = k*2*d + (intermediate_threadIdx /((int) pow(2, (i+1)))) * pow(2, i+1);
int idx_start_a = offset + (i%2)*d;
int idx_start_b = idx_start_a + size;
int m = intermediate_threadIdx % ((int) pow(2, (i+1)));
// device function
trifusion(M+ idx_start_a, M+idx_start_b, M+offset + (!(i%2))*d, size, size, m);
}
void batch_sort(int d, int batch_dim, int max_threads_per_block,bool shared){
// store on GPU a vectot M of size 2 * batch_dim * d
// copy each vector j to A[j][0....d] (setting 0 to A[j][d+1, ...2d-1]
// A[batch_id][ 0, ... d//2] keeps old values and A[batch_id][d//2+1, ....d] new ones or vice versa, using i%2 trick
int * mCPU = rand_int_array(2*d*batch_dim);
int * mSOL = new int[2*d*batch_dim];
// print result
/*
f(i, 2*d*batch_dim){
std::cout << mCPU[i] << "\t";
}
std::cout<< std::endl;*/
int * mGPU;
testCUDA(cudaMalloc(&mGPU,2*d*batch_dim*sizeof(int)));
testCUDA(cudaMemcpy(mGPU,mCPU, 2*d*batch_dim*sizeof(int), cudaMemcpyHostToDevice));
// inplace sort. mCPU will be used in the future to compare sol from GPU
cpu_batch_sort(mCPU, d , batch_dim);
int mul = (d>max_threads_per_block)? (d / max_threads_per_block) : 1;
// timer block
float TimeVar;
cudaEvent_t start, stop;
if (shared){
testCUDA(cudaEventCreate(&start));
testCUDA(cudaEventCreate(&stop));
testCUDA(cudaEventRecord(start,0));
// timer block
// execution block
f(i, ((int) (log(d)/ log(2)))){
// for each vector to sort, 2**( log d - i -1) merges to do, each merge take 2**(i+1) threads => always d threads on total
//kernel_batch_sort<<< batch_dim*mul, (d > max_threads_per_block)? max_threads_per_block: d >>> (mGPU, i,mul, d);
kernel_batch_sort_shared<<< 1, d ,2*d*sizeof(int)>>> (mGPU, i, d);
}
// execution block
//timer block
testCUDA(cudaEventRecord(stop,0));
testCUDA(cudaEventSynchronize(stop));
testCUDA(cudaEventElapsedTime(&TimeVar, start, stop));
} else{
testCUDA(cudaEventCreate(&start));
testCUDA(cudaEventCreate(&stop));
testCUDA(cudaEventRecord(start,0));
// timer block
// execution block
f(i, ((int) (log(d)/ log(2)))){
// for each vector to sort, 2**( log d - i -1) merges to do, each merge take 2**(i+1) threads => always d threads on total
kernel_batch_sort<<< batch_dim*mul, (d > max_threads_per_block)? max_threads_per_block: d >>> (mGPU, i,mul, d);
//kernel_batch_sort_shared<<< batch_dim*mul, (d > max_threads_per_block)? max_threads_per_block: d ,2*d*sizeof(int)>>> (mGPU, i, d);
}
// execution block
//timer block
testCUDA(cudaEventRecord(stop,0));
testCUDA(cudaEventSynchronize(stop));
testCUDA(cudaEventElapsedTime(&TimeVar, start, stop));
}
// timer block
testCUDA(cudaMemcpy(mSOL, mGPU, 2*d*batch_dim*sizeof(int), cudaMemcpyDeviceToHost));
// print result
/*
f(i, 2*d*batch_dim){
std::cout << mSOL[i] << "\t";
}
std::cout<< std::endl;*/
if(check_solution_batch(mCPU, mSOL, d, batch_dim)) std::cout << "Parallel solution OK" << std::endl;
else std::cout << "Parallel solution Wrong" << std::endl;
std::cout << "Elapsed GPU time: " << TimeVar << " ms" << std::endl << std::endl;
// memory free
testCUDA(cudaFree(mGPU));
delete [] mCPU;
delete [] mSOL;
}
int main(int argc, char * argv[]){
// cin and cout as fast as printf
std::ios_base::sync_with_stdio(false);
// function to test merge algorithm. Tested
//trifusion_test();
int d = 4;
int batch_dim = 1;
bool shared = false;
if(argc==3){
d = std::stoi(argv[1]);
batch_dim= std::stoi(argv[2]);
}
if (d <= 1024 && batch_dim == 1){
char c;
printf("Use shared memory? (y/n): ");
scanf("%c", &c);
if (c=='y'){
shared = true;
}
}
if(isPowerOfTwo(d)){
// check the number of SM and the parameters given
cudaDeviceProp prop;
testCUDA(cudaGetDeviceProperties(&prop,0));
std::cout << "GPU informations " << std::endl;
std::cout << "-----------------" << std::endl;
std::cout << "Max threads per block: " << prop.maxThreadsPerBlock << std::endl;
std::cout << "SM count: " << prop.multiProcessorCount << std::endl << std::endl;
int mul = (d>prop.maxThreadsPerBlock)? (d / prop.maxThreadsPerBlock) : 1;
if(mul*batch_dim > prop.multiProcessorCount){
std::cout << "WARNING: number of blocks greater than GPU SM count" << std::endl << std::endl;
}
batch_sort(d,batch_dim, prop.maxThreadsPerBlock,shared);
}else{
std::cout << "ABORTED: d is not power of 2" << std::endl;
}
return 0;
}
|
9fd322644b577cc7470d03786e8874a434497752.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#include "matx.h"
#include "test_types.h"
#include "utilities.h"
#include "gtest/gtest.h"
using namespace matx;
constexpr index_t a_len0 = 256;
constexpr index_t b_len0_even = 16;
constexpr index_t b_len0_odd = 15;
constexpr index_t c_len0_full_even = a_len0 + b_len0_even - 1;
constexpr index_t c_len0_full_odd = a_len0 + b_len0_odd - 1;
constexpr index_t c_len0_valid_even = a_len0 - b_len0_even + 1;
constexpr index_t c_len0_valid_odd = a_len0 - b_len0_odd + 1;
constexpr index_t c_len0_same = a_len0;
constexpr index_t a_len1 = 128;
constexpr index_t b_len1_even = 8;
constexpr index_t b_len1_odd = 7;
constexpr index_t c_len1_full_even = a_len1 + b_len1_even - 1;
constexpr index_t c_len1_full_odd = a_len1 + b_len1_odd - 1;
constexpr index_t c_len1_valid_even = a_len1 - b_len1_even + 1;
constexpr index_t c_len1_valid_odd = a_len1 - b_len1_odd + 1;
constexpr index_t c_len1_same = a_len1;
constexpr index_t a_len = 8 * 122880 + 2 * 32768;
constexpr index_t b_len = 209;
constexpr index_t c_len = a_len + b_len - 1;
template <typename T>
class CorrelationConvolutionTest : public ::testing::Test {
protected:
void SetUp() override
{
CheckTestTypeSupport<T>();
pb = std::make_unique<detail::MatXPybind>();
// Half precision needs a bit more tolerance when compared to
// fp32
if constexpr (is_complex_half_v<T> || is_matx_half_v<T>) {
thresh = 0.2f;
}
}
void TearDown() { pb.reset(); }
std::unique_ptr<detail::MatXPybind> pb;
tensor_t<T, 1> av{{a_len0}};
tensor_t<T, 1> bv_even{{b_len0_even}};
tensor_t<T, 1> bv_odd{{b_len0_odd}};
tensor_t<T, 1> cv_full_even{{c_len0_full_even}};
tensor_t<T, 1> cv_full_odd{{c_len0_full_odd}};
tensor_t<T, 1> cv_valid_even{{c_len0_valid_even}};
tensor_t<T, 1> cv_valid_odd{{c_len0_valid_odd}};
tensor_t<T, 1> cv_same{{c_len0_same}};
float thresh = 0.01f;
};
template <typename T>
class CorrelationConvolution2DTest : public ::testing::Test {
protected:
void SetUp() override
{
CheckTestTypeSupport<T>();
pb = std::make_unique<detail::MatXPybind>();
// Half precision needs a bit more tolerance when compared to
// fp32
if constexpr (is_complex_half_v<T> || is_matx_half_v<T>) {
thresh = .2f;
}
}
void TearDown() { pb.reset(); }
std::unique_ptr<detail::MatXPybind> pb;
tensor_t<T, 2> av{{a_len0,a_len1}};
tensor_t<T, 2> bv_even{{b_len0_even,b_len1_even}};
tensor_t<T, 2> bv_odd{{b_len0_odd,b_len1_odd}};
tensor_t<T, 2> cv_full_even{{c_len0_full_even,c_len1_full_even}};
tensor_t<T, 2> cv_full_odd{{c_len0_full_odd,c_len1_full_odd}};
tensor_t<T, 2> cv_valid_even{{c_len0_valid_even,c_len1_valid_even}};
tensor_t<T, 2> cv_valid_odd{{c_len0_valid_odd,c_len1_valid_odd}};
tensor_t<T, 2> cv_same{{c_len0_same,c_len1_same}};
float thresh = 0.01f;
};
template <typename T>
class CorrelationConvolutionLargeTest : public ::testing::Test {
protected:
void SetUp() override
{
CheckTestTypeSupport<T>();
pb = std::make_unique<detail::MatXPybind>();
// Half precision needs a bit more tolerance when compared to
// fp32
if constexpr (is_complex_half_v<T> || is_matx_half_v<T>) {
thresh = 0.2f;
}
}
void TearDown() { pb.reset(); }
std::unique_ptr<detail::MatXPybind> pb;
tensor_t<T, 1> av{{a_len}};
tensor_t<T, 1> bv{{b_len}};
tensor_t<T, 1> cv{{c_len}};
float thresh = 0.01f;
};
template <typename TensorType>
class CorrelationConvolutionTestFloatTypes
: public CorrelationConvolutionTest<TensorType> {
};
template <typename TensorType>
class CorrelationConvolutionTestNonHalfFloatTypes
: public CorrelationConvolutionTest<TensorType> {
};
template <typename TensorType>
class CorrelationConvolutionLargeTestFloatTypes
: public CorrelationConvolutionLargeTest<TensorType> {
};
template <typename TensorType>
class CorrelationConvolution2DTestFloatTypes
: public CorrelationConvolution2DTest<TensorType> {
};
template <typename TensorType>
class CorrelationConvolutionComplexTypes
: public CorrelationConvolutionTest<TensorType> {
};
TYPED_TEST_SUITE(CorrelationConvolutionTestFloatTypes, MatXFloatTypes);
TYPED_TEST_SUITE(CorrelationConvolutionTestNonHalfFloatTypes, MatXFloatNonHalfTypes);
TYPED_TEST_SUITE(CorrelationConvolutionLargeTestFloatTypes, MatXFloatNonHalfTypes);
TYPED_TEST_SUITE(CorrelationConvolution2DTestFloatTypes, MatXFloatNonHalfTypes);
// Real/real direct 1D convolution Large
TYPED_TEST(CorrelationConvolutionLargeTestFloatTypes, Direct1DConvolutionLarge)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len, b_len});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv, "b_op");
// example-begin conv1d-test-1
// 1D convolution in FULL mode where every output is stored
(this->cv = conv1d(this->av, this->bv, MATX_C_MODE_FULL)).run();
// example-end conv1d-test-1
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionLargeTestFloatTypes, FFT1DConvolutionLarge)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len, b_len});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv, "b_op");
// 1D convolution in FULL mode where every output is stored
(this->cv = conv1d(this->av, this->bv, MATX_C_MODE_FULL, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
// Real/real direct 1D convolution
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionFullEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv1d(this->av, this->bv_even, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionFullEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv1d(this->av, this->bv_even, MATX_C_MODE_FULL, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
// Real/real direct 2D convolution
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionFullEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_even, b_len1_even});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv2d(this->av, this->bv_even, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, Direct1DConvolutionSameEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_same = conv1d(this->av, this->bv_even, MATX_C_MODE_SAME)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionSameEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_same = conv1d(this->av, this->bv_even, MATX_C_MODE_SAME, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionSameEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_even, b_len1_even});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
// example-begin conv2d-test-1
(this->cv_same = conv2d(this->av, this->bv_even, MATX_C_MODE_SAME)).run();
// example-end conv2d-test-1
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionValidEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_valid_even = conv1d(this->av, this->bv_even, MATX_C_MODE_VALID)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_even, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionValidEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_valid_even = conv1d(this->av, this->bv_even, MATX_C_MODE_VALID, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_even, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionValidEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_even, b_len1_even});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_valid_even = conv2d(this->av, this->bv_even, MATX_C_MODE_VALID)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_even, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionFullOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_full_odd = conv1d(this->av, this->bv_odd, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_odd, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionFullOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_full_odd = conv1d(this->av, this->bv_odd, MATX_C_MODE_FULL, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_odd, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionFullOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_odd, b_len1_odd});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_full_odd = conv2d(this->av, this->bv_odd, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_odd, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionSameOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_same = conv1d(this->av, this->bv_odd, MATX_C_MODE_SAME)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionSameOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_same = conv1d(this->av, this->bv_odd, MATX_C_MODE_SAME, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionSameOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_odd, b_len1_odd});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_same = conv2d(this->av, this->bv_odd, MATX_C_MODE_SAME)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionValidOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_valid_odd = conv1d(this->av, this->bv_odd, MATX_C_MODE_VALID)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_odd, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionValidOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_valid_odd = conv1d(this->av, this->bv_odd, MATX_C_MODE_VALID, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_odd, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionValidOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_odd, b_len1_odd});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_valid_odd = conv2d(this->av, this->bv_odd, MATX_C_MODE_VALID)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_odd, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionSwap)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv1d(this->bv_even, this->av, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionSwap)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv1d(this->bv_even, this->av, MATX_C_MODE_FULL, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionSwap)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_even, b_len1_even});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv2d(this->bv_even, this->av, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DCorrelation)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("corr");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
// example-begin corr-test-1
// Full correlation mode with direct correlation
(this->cv_full_even = corr(this->av, this->bv_even, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT)).run();
// example-end corr-test-1
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "corr", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DCorrelation)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("corr");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
// Full correlation mode with direct correlation
(this->cv_full_even = corr(this->av, this->bv_even, MATX_C_MODE_FULL, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "corr", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DCorrelationSwap)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("corr_swap");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = corr(this->bv_even, this->av, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "corr_swap", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Conv1Axis)
{
MATX_ENTER_HANDLER();
const int d1 = 8;
const int d2 = 512;
const int d3 = 1024;
auto in1 = make_tensor<TypeParam>({d1, d2, d3});
auto in2 = make_tensor<TypeParam>({d1, d2, d3});
auto out1 = make_tensor<TypeParam>({d1, d2, d3});
auto out2 = make_tensor<TypeParam>({d1, d2, d3});
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
in1(i,j,k) = static_cast<TypeParam>((float)(i+j+k));
in2(i,j,k) = static_cast<TypeParam>((float)(1));
}
}
}
(out1 = conv1d(in1, in2, MATX_C_MODE_SAME)).run();
// example-begin conv1d-test-2
(out2 = conv1d(in1, in2, {2}, MATX_C_MODE_SAME)).run();
// example-end conv1d-test-2
hipStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({0,2,1}) = conv1d(in1.Permute({0,2,1}), in2.Permute({0,2,1}), MATX_C_MODE_SAME)).run();
// example-begin conv1d-test-3
(out2 = conv1d(in1, in2, {1}, MATX_C_MODE_SAME)).run();
// example-end conv1d-test-3
hipStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({1,2,0}) = conv1d(in1.Permute({1,2,0}), in2.Permute({1,2,0}), MATX_C_MODE_SAME)).run();
(out2 = conv1d(in1, in2, {0}, MATX_C_MODE_SAME)).run();
hipStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1 = corr(in1, in2, MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
(out2 = corr(in1, in2, {2}, MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
hipStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({0,2,1}) = corr(in1.Permute({0,2,1}), in2.Permute({0,2,1}), MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
(out2 = corr(in1, in2, {1}, MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
hipStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({1,2,0}) = corr(in1.Permute({1,2,0}), in2.Permute({1,2,0}), MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
(out2 = corr(in1, in2, {0}, MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
hipStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Conv2Axis)
{
MATX_ENTER_HANDLER();
#if 1 // currently doesn't work because Conv2D requires rank2 filter.
const int d1 = 8;
const int d2 = 512;
const int d3 = 1024;
auto in1 = make_tensor<TypeParam>({d1, d2, d3});
auto in2 = make_tensor<TypeParam>({d1, d2, d3});
auto out1 = make_tensor<TypeParam>({d1, d2, d3});
auto out2 = make_tensor<TypeParam>({d1, d2, d3});
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
in1(i,j,k) = static_cast<TypeParam>((float)(i+j+k));
in2(i,j,k) = static_cast<TypeParam>((float)(1));
}
}
}
(out1 = conv2d(in1, in2, MATX_C_MODE_SAME)).run();
(out2 = conv2d(in1, in2, {1, 2}, MATX_C_MODE_SAME)).run();
hipStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({0,2,1}) = conv2d(in1.Permute({0,2,1}), in2.Permute({0,2,1}), MATX_C_MODE_SAME)).run();
(out2 = conv2d(in1, in2, {2, 1}, MATX_C_MODE_SAME)).run();
hipStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({1,2,0}) = conv2d(in1.Permute({1,2,0}), in2.Permute({1,2,0}), MATX_C_MODE_SAME)).run();
(out2 = conv2d(in1, in2, {2, 0}, MATX_C_MODE_SAME)).run();
hipStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
#endif
MATX_EXIT_HANDLER();
}
// // Complex/complex direct 1D convolution
// TEST_F(CorrelationConvolutionTest, Direct1DC2CConvolution)
// {
// MATX_ENTER_HANDLER();
// conv1d(ccv, acv, bcv, MATX_C_MODE_FULL, 0);
// MATX_TEST_ASSERT_COMPARE(pb, ccv, "c_op_complex_conv", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Real/real direct 1D convolution with swapped parameters
// TEST_F(CorrelationConvolutionTest, Direct1DR2RConvolutionSwap)
// {
// MATX_ENTER_HANDLER();
// conv1d(crv, brv, arv, MATX_C_MODE_FULL, 0);
// MATX_TEST_ASSERT_COMPARE(pb, crv, "c_op_real_conv", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Complex/complex direct 1D convolution with swapped parameters
// TEST_F(CorrelationConvolutionTest, Direct1DC2CConvolutionSwap)
// {
// MATX_ENTER_HANDLER();
// conv1d(ccv, bcv, acv, MATX_C_MODE_FULL, 0);
// MATX_TEST_ASSERT_COMPARE(pb, ccv, "c_op_complex_conv", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Real/real direct 1D correlation
// TEST_F(CorrelationConvolutionTest, Direct1DR2RCorrelation)
// {
// MATX_ENTER_HANDLER();
// corr(crv, arv, brv, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT, 0);
// MATX_TEST_ASSERT_COMPARE(pb, crv, "c_op_real_corr", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Complex/complex direct 1D correlation
// TEST_F(CorrelationConvolutionTest, Direct1DC2CCorrelation)
// {
// MATX_ENTER_HANDLER();
// corr(ccv, acv, bcv, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT, 0);
// MATX_TEST_ASSERT_COMPARE(pb, ccv, "c_op_complex_corr", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Real/real direct 1D correlation with swapped parameters
// TEST_F(CorrelationConvolutionTest, Direct1DR2RCorrelationSwap)
// {
// MATX_ENTER_HANDLER();
// corr(crv, brv, arv, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT, 0);
// MATX_TEST_ASSERT_COMPARE(pb, crv, "c_op_real_corr_swap", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Complex/complex direct 1D correlation with swapped parameters
// TEST_F(CorrelationConvolutionTest, Direct1DC2CCorrelationSwap)
// {
// MATX_ENTER_HANDLER();
// corr(ccv, bcv, acv, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT, 0);
// MATX_TEST_ASSERT_COMPARE(pb, ccv, "c_op_complex_corr_swap", 0.01);
// MATX_EXIT_HANDLER();
// }
| 9fd322644b577cc7470d03786e8874a434497752.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#include "matx.h"
#include "test_types.h"
#include "utilities.h"
#include "gtest/gtest.h"
using namespace matx;
constexpr index_t a_len0 = 256;
constexpr index_t b_len0_even = 16;
constexpr index_t b_len0_odd = 15;
constexpr index_t c_len0_full_even = a_len0 + b_len0_even - 1;
constexpr index_t c_len0_full_odd = a_len0 + b_len0_odd - 1;
constexpr index_t c_len0_valid_even = a_len0 - b_len0_even + 1;
constexpr index_t c_len0_valid_odd = a_len0 - b_len0_odd + 1;
constexpr index_t c_len0_same = a_len0;
constexpr index_t a_len1 = 128;
constexpr index_t b_len1_even = 8;
constexpr index_t b_len1_odd = 7;
constexpr index_t c_len1_full_even = a_len1 + b_len1_even - 1;
constexpr index_t c_len1_full_odd = a_len1 + b_len1_odd - 1;
constexpr index_t c_len1_valid_even = a_len1 - b_len1_even + 1;
constexpr index_t c_len1_valid_odd = a_len1 - b_len1_odd + 1;
constexpr index_t c_len1_same = a_len1;
constexpr index_t a_len = 8 * 122880 + 2 * 32768;
constexpr index_t b_len = 209;
constexpr index_t c_len = a_len + b_len - 1;
template <typename T>
class CorrelationConvolutionTest : public ::testing::Test {
protected:
void SetUp() override
{
CheckTestTypeSupport<T>();
pb = std::make_unique<detail::MatXPybind>();
// Half precision needs a bit more tolerance when compared to
// fp32
if constexpr (is_complex_half_v<T> || is_matx_half_v<T>) {
thresh = 0.2f;
}
}
void TearDown() { pb.reset(); }
std::unique_ptr<detail::MatXPybind> pb;
tensor_t<T, 1> av{{a_len0}};
tensor_t<T, 1> bv_even{{b_len0_even}};
tensor_t<T, 1> bv_odd{{b_len0_odd}};
tensor_t<T, 1> cv_full_even{{c_len0_full_even}};
tensor_t<T, 1> cv_full_odd{{c_len0_full_odd}};
tensor_t<T, 1> cv_valid_even{{c_len0_valid_even}};
tensor_t<T, 1> cv_valid_odd{{c_len0_valid_odd}};
tensor_t<T, 1> cv_same{{c_len0_same}};
float thresh = 0.01f;
};
template <typename T>
class CorrelationConvolution2DTest : public ::testing::Test {
protected:
void SetUp() override
{
CheckTestTypeSupport<T>();
pb = std::make_unique<detail::MatXPybind>();
// Half precision needs a bit more tolerance when compared to
// fp32
if constexpr (is_complex_half_v<T> || is_matx_half_v<T>) {
thresh = .2f;
}
}
void TearDown() { pb.reset(); }
std::unique_ptr<detail::MatXPybind> pb;
tensor_t<T, 2> av{{a_len0,a_len1}};
tensor_t<T, 2> bv_even{{b_len0_even,b_len1_even}};
tensor_t<T, 2> bv_odd{{b_len0_odd,b_len1_odd}};
tensor_t<T, 2> cv_full_even{{c_len0_full_even,c_len1_full_even}};
tensor_t<T, 2> cv_full_odd{{c_len0_full_odd,c_len1_full_odd}};
tensor_t<T, 2> cv_valid_even{{c_len0_valid_even,c_len1_valid_even}};
tensor_t<T, 2> cv_valid_odd{{c_len0_valid_odd,c_len1_valid_odd}};
tensor_t<T, 2> cv_same{{c_len0_same,c_len1_same}};
float thresh = 0.01f;
};
template <typename T>
class CorrelationConvolutionLargeTest : public ::testing::Test {
protected:
void SetUp() override
{
CheckTestTypeSupport<T>();
pb = std::make_unique<detail::MatXPybind>();
// Half precision needs a bit more tolerance when compared to
// fp32
if constexpr (is_complex_half_v<T> || is_matx_half_v<T>) {
thresh = 0.2f;
}
}
void TearDown() { pb.reset(); }
std::unique_ptr<detail::MatXPybind> pb;
tensor_t<T, 1> av{{a_len}};
tensor_t<T, 1> bv{{b_len}};
tensor_t<T, 1> cv{{c_len}};
float thresh = 0.01f;
};
template <typename TensorType>
class CorrelationConvolutionTestFloatTypes
: public CorrelationConvolutionTest<TensorType> {
};
template <typename TensorType>
class CorrelationConvolutionTestNonHalfFloatTypes
: public CorrelationConvolutionTest<TensorType> {
};
template <typename TensorType>
class CorrelationConvolutionLargeTestFloatTypes
: public CorrelationConvolutionLargeTest<TensorType> {
};
template <typename TensorType>
class CorrelationConvolution2DTestFloatTypes
: public CorrelationConvolution2DTest<TensorType> {
};
template <typename TensorType>
class CorrelationConvolutionComplexTypes
: public CorrelationConvolutionTest<TensorType> {
};
TYPED_TEST_SUITE(CorrelationConvolutionTestFloatTypes, MatXFloatTypes);
TYPED_TEST_SUITE(CorrelationConvolutionTestNonHalfFloatTypes, MatXFloatNonHalfTypes);
TYPED_TEST_SUITE(CorrelationConvolutionLargeTestFloatTypes, MatXFloatNonHalfTypes);
TYPED_TEST_SUITE(CorrelationConvolution2DTestFloatTypes, MatXFloatNonHalfTypes);
// Real/real direct 1D convolution Large
TYPED_TEST(CorrelationConvolutionLargeTestFloatTypes, Direct1DConvolutionLarge)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len, b_len});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv, "b_op");
// example-begin conv1d-test-1
// 1D convolution in FULL mode where every output is stored
(this->cv = conv1d(this->av, this->bv, MATX_C_MODE_FULL)).run();
// example-end conv1d-test-1
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionLargeTestFloatTypes, FFT1DConvolutionLarge)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len, b_len});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv, "b_op");
// 1D convolution in FULL mode where every output is stored
(this->cv = conv1d(this->av, this->bv, MATX_C_MODE_FULL, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
// Real/real direct 1D convolution
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionFullEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv1d(this->av, this->bv_even, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionFullEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv1d(this->av, this->bv_even, MATX_C_MODE_FULL, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
// Real/real direct 2D convolution
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionFullEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_even, b_len1_even});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv2d(this->av, this->bv_even, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, Direct1DConvolutionSameEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_same = conv1d(this->av, this->bv_even, MATX_C_MODE_SAME)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionSameEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_same = conv1d(this->av, this->bv_even, MATX_C_MODE_SAME, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionSameEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_even, b_len1_even});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
// example-begin conv2d-test-1
(this->cv_same = conv2d(this->av, this->bv_even, MATX_C_MODE_SAME)).run();
// example-end conv2d-test-1
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionValidEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_valid_even = conv1d(this->av, this->bv_even, MATX_C_MODE_VALID)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_even, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionValidEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_valid_even = conv1d(this->av, this->bv_even, MATX_C_MODE_VALID, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_even, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionValidEven)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_even, b_len1_even});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_valid_even = conv2d(this->av, this->bv_even, MATX_C_MODE_VALID)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_even, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionFullOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_full_odd = conv1d(this->av, this->bv_odd, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_odd, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionFullOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_full_odd = conv1d(this->av, this->bv_odd, MATX_C_MODE_FULL, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_odd, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionFullOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_odd, b_len1_odd});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_full_odd = conv2d(this->av, this->bv_odd, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_odd, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionSameOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_same = conv1d(this->av, this->bv_odd, MATX_C_MODE_SAME)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionSameOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_same = conv1d(this->av, this->bv_odd, MATX_C_MODE_SAME, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionSameOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_odd, b_len1_odd});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_same = conv2d(this->av, this->bv_odd, MATX_C_MODE_SAME)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_same, "conv_same", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionValidOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_valid_odd = conv1d(this->av, this->bv_odd, MATX_C_MODE_VALID)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_odd, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionValidOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_odd});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_valid_odd = conv1d(this->av, this->bv_odd, MATX_C_MODE_VALID, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_odd, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionValidOdd)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_odd, b_len1_odd});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_odd, "b_op");
(this->cv_valid_odd = conv2d(this->av, this->bv_odd, MATX_C_MODE_VALID)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_valid_odd, "conv_valid", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DConvolutionSwap)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv1d(this->bv_even, this->av, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DConvolutionSwap)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("conv");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv1d(this->bv_even, this->av, MATX_C_MODE_FULL, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolution2DTestFloatTypes, Direct2DConvolutionSwap)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv2d_operators", {a_len0, a_len1, b_len0_even, b_len1_even});
this->pb->RunTVGenerator("conv2d");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = conv2d(this->bv_even, this->av, MATX_C_MODE_FULL)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "conv_full", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DCorrelation)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("corr");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
// example-begin corr-test-1
// Full correlation mode with direct correlation
(this->cv_full_even = corr(this->av, this->bv_even, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT)).run();
// example-end corr-test-1
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "corr", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestNonHalfFloatTypes, FFT1DCorrelation)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("corr");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
// Full correlation mode with direct correlation
(this->cv_full_even = corr(this->av, this->bv_even, MATX_C_MODE_FULL, MATX_C_METHOD_FFT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "corr", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Direct1DCorrelationSwap)
{
MATX_ENTER_HANDLER();
this->pb->template InitTVGenerator<TypeParam>("00_transforms", "conv_operators", {a_len0, b_len0_even});
this->pb->RunTVGenerator("corr_swap");
this->pb->NumpyToTensorView(this->av, "a_op");
this->pb->NumpyToTensorView(this->bv_even, "b_op");
(this->cv_full_even = corr(this->bv_even, this->av, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT)).run();
MATX_TEST_ASSERT_COMPARE(this->pb, this->cv_full_even, "corr_swap", this->thresh);
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Conv1Axis)
{
MATX_ENTER_HANDLER();
const int d1 = 8;
const int d2 = 512;
const int d3 = 1024;
auto in1 = make_tensor<TypeParam>({d1, d2, d3});
auto in2 = make_tensor<TypeParam>({d1, d2, d3});
auto out1 = make_tensor<TypeParam>({d1, d2, d3});
auto out2 = make_tensor<TypeParam>({d1, d2, d3});
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
in1(i,j,k) = static_cast<TypeParam>((float)(i+j+k));
in2(i,j,k) = static_cast<TypeParam>((float)(1));
}
}
}
(out1 = conv1d(in1, in2, MATX_C_MODE_SAME)).run();
// example-begin conv1d-test-2
(out2 = conv1d(in1, in2, {2}, MATX_C_MODE_SAME)).run();
// example-end conv1d-test-2
cudaStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({0,2,1}) = conv1d(in1.Permute({0,2,1}), in2.Permute({0,2,1}), MATX_C_MODE_SAME)).run();
// example-begin conv1d-test-3
(out2 = conv1d(in1, in2, {1}, MATX_C_MODE_SAME)).run();
// example-end conv1d-test-3
cudaStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({1,2,0}) = conv1d(in1.Permute({1,2,0}), in2.Permute({1,2,0}), MATX_C_MODE_SAME)).run();
(out2 = conv1d(in1, in2, {0}, MATX_C_MODE_SAME)).run();
cudaStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1 = corr(in1, in2, MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
(out2 = corr(in1, in2, {2}, MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
cudaStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({0,2,1}) = corr(in1.Permute({0,2,1}), in2.Permute({0,2,1}), MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
(out2 = corr(in1, in2, {1}, MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
cudaStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({1,2,0}) = corr(in1.Permute({1,2,0}), in2.Permute({1,2,0}), MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
(out2 = corr(in1, in2, {0}, MATX_C_MODE_SAME, MATX_C_METHOD_DIRECT)).run();
cudaStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
MATX_EXIT_HANDLER();
}
TYPED_TEST(CorrelationConvolutionTestFloatTypes, Conv2Axis)
{
MATX_ENTER_HANDLER();
#if 1 // currently doesn't work because Conv2D requires rank2 filter.
const int d1 = 8;
const int d2 = 512;
const int d3 = 1024;
auto in1 = make_tensor<TypeParam>({d1, d2, d3});
auto in2 = make_tensor<TypeParam>({d1, d2, d3});
auto out1 = make_tensor<TypeParam>({d1, d2, d3});
auto out2 = make_tensor<TypeParam>({d1, d2, d3});
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
in1(i,j,k) = static_cast<TypeParam>((float)(i+j+k));
in2(i,j,k) = static_cast<TypeParam>((float)(1));
}
}
}
(out1 = conv2d(in1, in2, MATX_C_MODE_SAME)).run();
(out2 = conv2d(in1, in2, {1, 2}, MATX_C_MODE_SAME)).run();
cudaStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({0,2,1}) = conv2d(in1.Permute({0,2,1}), in2.Permute({0,2,1}), MATX_C_MODE_SAME)).run();
(out2 = conv2d(in1, in2, {2, 1}, MATX_C_MODE_SAME)).run();
cudaStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
(out1.Permute({1,2,0}) = conv2d(in1.Permute({1,2,0}), in2.Permute({1,2,0}), MATX_C_MODE_SAME)).run();
(out2 = conv2d(in1, in2, {2, 0}, MATX_C_MODE_SAME)).run();
cudaStreamSynchronize(0);
for(int i = 0; i < d1; i++) {
for(int j = 0; j < d2; j++) {
for(int k = 0; k < d3; k++) {
ASSERT_EQ(out1(i,j,k), out2(i,j,k));
}
}
}
#endif
MATX_EXIT_HANDLER();
}
// // Complex/complex direct 1D convolution
// TEST_F(CorrelationConvolutionTest, Direct1DC2CConvolution)
// {
// MATX_ENTER_HANDLER();
// conv1d(ccv, acv, bcv, MATX_C_MODE_FULL, 0);
// MATX_TEST_ASSERT_COMPARE(pb, ccv, "c_op_complex_conv", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Real/real direct 1D convolution with swapped parameters
// TEST_F(CorrelationConvolutionTest, Direct1DR2RConvolutionSwap)
// {
// MATX_ENTER_HANDLER();
// conv1d(crv, brv, arv, MATX_C_MODE_FULL, 0);
// MATX_TEST_ASSERT_COMPARE(pb, crv, "c_op_real_conv", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Complex/complex direct 1D convolution with swapped parameters
// TEST_F(CorrelationConvolutionTest, Direct1DC2CConvolutionSwap)
// {
// MATX_ENTER_HANDLER();
// conv1d(ccv, bcv, acv, MATX_C_MODE_FULL, 0);
// MATX_TEST_ASSERT_COMPARE(pb, ccv, "c_op_complex_conv", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Real/real direct 1D correlation
// TEST_F(CorrelationConvolutionTest, Direct1DR2RCorrelation)
// {
// MATX_ENTER_HANDLER();
// corr(crv, arv, brv, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT, 0);
// MATX_TEST_ASSERT_COMPARE(pb, crv, "c_op_real_corr", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Complex/complex direct 1D correlation
// TEST_F(CorrelationConvolutionTest, Direct1DC2CCorrelation)
// {
// MATX_ENTER_HANDLER();
// corr(ccv, acv, bcv, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT, 0);
// MATX_TEST_ASSERT_COMPARE(pb, ccv, "c_op_complex_corr", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Real/real direct 1D correlation with swapped parameters
// TEST_F(CorrelationConvolutionTest, Direct1DR2RCorrelationSwap)
// {
// MATX_ENTER_HANDLER();
// corr(crv, brv, arv, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT, 0);
// MATX_TEST_ASSERT_COMPARE(pb, crv, "c_op_real_corr_swap", 0.01);
// MATX_EXIT_HANDLER();
// }
// // Complex/complex direct 1D correlation with swapped parameters
// TEST_F(CorrelationConvolutionTest, Direct1DC2CCorrelationSwap)
// {
// MATX_ENTER_HANDLER();
// corr(ccv, bcv, acv, MATX_C_MODE_FULL, MATX_C_METHOD_DIRECT, 0);
// MATX_TEST_ASSERT_COMPARE(pb, ccv, "c_op_complex_corr_swap", 0.01);
// MATX_EXIT_HANDLER();
// }
|
1460b33db81554677d3ea97032f49e6dea6feddc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header_hip.cuh"
#include "gpu_memory.cuh"
__device__ void interpolate_gamma( cvklu_data *rate_data, double T, double *gamma, double *dgamma_dT )
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
log_temp_out = log(T);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
*gamma = rate_data->g_gammaH2_1[bin_id] + Tdef * (rate_data->g_gammaH2_1[bin_id+1] - rate_data->g_gammaH2_1[bin_id]);
*dgamma_dT = rate_data->g_dgammaH2_1_dT[bin_id] + Tdef * (rate_data->g_dgammaH2_1_dT[bin_id+1] - rate_data->g_dgammaH2_1_dT[bin_id]);
}
__device__ void evaluate_temperature( double* T, double* dTs_ge, const double *y, const double mdensity, cvklu_data *rate_data )
{
// iterate temperature to convergence
double t, tnew, tdiff;
double dge, dge_dT;
double gammaH2, dgammaH2_dT, _gammaH2_m1;
int count = 0;
int MAX_ITERATION = 100;
double gamma = 5./3.;
double _gamma_m1 = 1.0 / (gamma - 1.0);
double kb = 1.3806504e-16; // Boltzamann constant [erg/K]
// prepare t, tnew for the newton's iteration;
t = *T;
if (t != t) t = 1000.0;
tnew = 1.1*t;
tdiff = tnew - t;
while ( tdiff/ tnew > 0.001 ){
// We do Newton's Iteration to calculate the temperature
// Since gammaH2 is dependent on the temperature too!
interpolate_gamma( rate_data, t, &gammaH2, &dgammaH2_dT );
_gammaH2_m1 = 1.0 / (gammaH2 - 1.0);
dge_dT = t*kb*(-y[INDEX(0)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT - y[INDEX(1)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT)/(mdensity)
+ kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1 + y[INDEX(4)]*_gamma_m1
+ y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity);
dge = t*kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1
+ y[INDEX(4)]*_gamma_m1 + y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity) - y[INDEX(9)];
//This is the change in ge for each iteration
tnew = t - dge/dge_dT;
count += 1;
tdiff = fabs(t - tnew);
t = tnew;
if (count > MAX_ITERATION){
printf("T[tid = %d] failed to converge (iteration: %d); at T = %0.3g \n", T_ID, count, tnew );
}
if ( t!= t && T_ID == 0){
printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", T_ID, t, count, y[INDEX(9)], gammaH2);
t = 1000.0;
for (int i = 0; i < 10; i++){
printf("y[INDEX(%d)] = %0.5g \n", i, y[INDEX(i)]);
}
break;
}
}
// update the temperature;
*T = t;
*dTs_ge = 1.0 / dge_dT;
// printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", tid, t, count, y[INDEX(9)], gammaH2);
}
__device__ void interpolate_reaction_rates( double *reaction_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, invTs, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
invTs = 1.0 / temp_out;
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
reaction_rates_out[INDEX( 0)] = rate_data->r_k01[bin_id] + Tdef * (rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id]);
reaction_rates_out[INDEX( 1)] = rate_data->r_k02[bin_id] + Tdef * (rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id]);
reaction_rates_out[INDEX( 2)] = rate_data->r_k03[bin_id] + Tdef * (rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id]);
reaction_rates_out[INDEX( 3)] = rate_data->r_k04[bin_id] + Tdef * (rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id]);
reaction_rates_out[INDEX( 4)] = rate_data->r_k05[bin_id] + Tdef * (rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id]);
reaction_rates_out[INDEX( 5)] = rate_data->r_k06[bin_id] + Tdef * (rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id]);
reaction_rates_out[INDEX( 6)] = rate_data->r_k07[bin_id] + Tdef * (rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id]);
reaction_rates_out[INDEX( 7)] = rate_data->r_k08[bin_id] + Tdef * (rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id]);
reaction_rates_out[INDEX( 8)] = rate_data->r_k09[bin_id] + Tdef * (rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id]);
reaction_rates_out[INDEX( 9)] = rate_data->r_k10[bin_id] + Tdef * (rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id]);
reaction_rates_out[INDEX(10)] = rate_data->r_k11[bin_id] + Tdef * (rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id]);
reaction_rates_out[INDEX(11)] = rate_data->r_k12[bin_id] + Tdef * (rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id]);
reaction_rates_out[INDEX(12)] = rate_data->r_k13[bin_id] + Tdef * (rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id]);
reaction_rates_out[INDEX(13)] = rate_data->r_k14[bin_id] + Tdef * (rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id]);
reaction_rates_out[INDEX(14)] = rate_data->r_k15[bin_id] + Tdef * (rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id]);
reaction_rates_out[INDEX(15)] = rate_data->r_k16[bin_id] + Tdef * (rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id]);
reaction_rates_out[INDEX(16)] = rate_data->r_k17[bin_id] + Tdef * (rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id]);
reaction_rates_out[INDEX(17)] = rate_data->r_k18[bin_id] + Tdef * (rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id]);
reaction_rates_out[INDEX(18)] = rate_data->r_k19[bin_id] + Tdef * (rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id]);
//reaction_rates_out[INDEX(19)] = rate_data->r_k20[bin_id] + Tdef * (rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id]);
reaction_rates_out[INDEX(20)] = rate_data->r_k21[bin_id] + Tdef * (rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id]);
reaction_rates_out[INDEX(21)] = rate_data->r_k22[bin_id] + Tdef * (rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id]);
//reaction_rates_out[INDEX(22)] = rate_data->r_k23[bin_id] + Tdef * (rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id]);
}
__device__ void interpolate_cooling_rates( double *cooling_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
/*
if (T_ID == 0){
printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out);
}
*/
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
cooling_rates_out[INDEX( 0)] = rate_data->c_ceHI_ceHI[bin_id] + Tdef * (rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id]);
cooling_rates_out[INDEX( 1)] = rate_data->c_ceHeI_ceHeI[bin_id] + Tdef * (rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id]);
cooling_rates_out[INDEX( 2)] = rate_data->c_ceHeII_ceHeII[bin_id] + Tdef * (rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id]);
cooling_rates_out[INDEX( 3)] = rate_data->c_ciHeIS_ciHeIS[bin_id] + Tdef * (rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id]);
cooling_rates_out[INDEX( 4)] = rate_data->c_ciHI_ciHI[bin_id] + Tdef * (rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id]);
cooling_rates_out[INDEX( 5)] = rate_data->c_ciHeI_ciHeI[bin_id] + Tdef * (rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id]);
cooling_rates_out[INDEX( 6)] = rate_data->c_ciHeII_ciHeII[bin_id] + Tdef * (rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id]);
cooling_rates_out[INDEX( 7)] = rate_data->c_reHII_reHII[bin_id] + Tdef * (rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id]);
cooling_rates_out[INDEX( 8)] = rate_data->c_reHeII1_reHeII1[bin_id] + Tdef * (rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id]);
cooling_rates_out[INDEX( 9)] = rate_data->c_reHeII2_reHeII2[bin_id] + Tdef * (rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id]);
cooling_rates_out[INDEX(10)] = rate_data->c_reHeIII_reHeIII[bin_id] + Tdef * (rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id]);
cooling_rates_out[INDEX(11)] = rate_data->c_brem_brem[bin_id] + Tdef * (rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id]);
cooling_rates_out[INDEX(12)] = rate_data->c_gloverabel08_gaHI[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id]);
cooling_rates_out[INDEX(13)] = rate_data->c_gloverabel08_gaH2[bin_id] + Tdef * (rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id]);
cooling_rates_out[INDEX(14)] = rate_data->c_gloverabel08_gaHe[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id]);
cooling_rates_out[INDEX(15)] = rate_data->c_gloverabel08_gaHp[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id]);
cooling_rates_out[INDEX(16)] = rate_data->c_gloverabel08_gael[bin_id] + Tdef * (rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id]);
cooling_rates_out[INDEX(17)] = rate_data->c_gloverabel08_h2lte[bin_id] + Tdef * (rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id]);
cooling_rates_out[INDEX(18)] = rate_data->c_compton_comp_[bin_id] + Tdef * (rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id]);
cooling_rates_out[INDEX(19)] = rate_data->c_gammah_gammah[bin_id] + Tdef * (rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id]);
cooling_rates_out[INDEX(20)] = rate_data->c_h2formation_h2mheat[bin_id] + Tdef * (rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id]);
cooling_rates_out[INDEX(21)] = rate_data->c_h2formation_h2mcool[bin_id] + Tdef * (rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id]);
cooling_rates_out[INDEX(22)] = rate_data->c_h2formation_ncrn[bin_id] + Tdef * (rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id]);
cooling_rates_out[INDEX(23)] = rate_data->c_h2formation_ncrd1[bin_id] + Tdef * (rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id]);
cooling_rates_out[INDEX(24)] = rate_data->c_h2formation_ncrd2[bin_id] + Tdef * (rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id]);
cooling_rates_out[INDEX(25)] = rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
cooling_rates_out[INDEX(26)] = 1.0; //rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
}
__device__ void interpolate_dcrate_dT(double *dcr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
// dT = t2 - t1;
// inv_Ts = temp_out;
double _dT_Ts = 1.0 / ( (t2 -t1)* temp_out );
//ceHI_ceHI: 0
dcr_dT[INDEX( 0)] = (rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id]) * _dT_Ts;
//ceHeI_ceHeI: 1
dcr_dT[INDEX( 1)] = (rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id]) * _dT_Ts;
//ceHeII_ceHeII: 2
dcr_dT[INDEX( 2)] = (rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id]) * _dT_Ts;
//ciHeIS_ciHeIS: 3
dcr_dT[INDEX( 3)] = (rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id]) * _dT_Ts;
//ciHI_ciHI: 4
dcr_dT[INDEX( 4)] = (rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id]) * _dT_Ts;
//ciHeI_ciHeI: 5
dcr_dT[INDEX( 5)] = (rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id]) * _dT_Ts;
//ciHeII_ciHeII: 6
dcr_dT[INDEX( 6)] = (rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id]) * _dT_Ts;
//reHII_reHII: 7
dcr_dT[INDEX( 7)] = (rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id]) * _dT_Ts;
//reHeII1_reHeII1: 8
dcr_dT[INDEX( 8)] = (rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id]) * _dT_Ts;
//reHeII2_reHeII2: 9
dcr_dT[INDEX( 9)] = (rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id]) * _dT_Ts;
//reHeIII_reHeIII: 10
dcr_dT[INDEX(10)] = (rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id]) * _dT_Ts;
//brem_brem: 11
dcr_dT[INDEX(11)] = (rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id]) * _dT_Ts;
//gloverabel08_gaHI: 12
dcr_dT[INDEX(12)] = (rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id]) * _dT_Ts;
//gloverabel08_gaH2: 13
dcr_dT[INDEX(13)] = (rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id]) * _dT_Ts;
//gloverabel08_gaHe: 14
dcr_dT[INDEX(14)] = (rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id]) * _dT_Ts;
//gloverabel08_gaHp: 15
dcr_dT[INDEX(15)] = (rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id]) * _dT_Ts;
//gloverabel08_gael: 16
dcr_dT[INDEX(16)] = (rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id]) * _dT_Ts;
//gloverabel08_h2lte: 17
dcr_dT[INDEX(17)] = (rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id]) * _dT_Ts;
//compton_comp_: 18
dcr_dT[INDEX(18)] = (rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id]) * _dT_Ts;
//gammah_gammah: 19
dcr_dT[INDEX(19)] = (rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id]) * _dT_Ts;
//h2formation_h2mheat: 20
dcr_dT[INDEX(20)] = (rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id]) * _dT_Ts;
//h2formation_h2mcool: 21
dcr_dT[INDEX(21)] = (rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id]) * _dT_Ts;
//h2formation_ncrn: 22
dcr_dT[INDEX(22)] = (rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id]) * _dT_Ts;
//h2formation_ncrd1: 23
dcr_dT[INDEX(23)] = (rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id]) * _dT_Ts;
//h2formation_ncrd2: 24
dcr_dT[INDEX(24)] = (rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id]) * _dT_Ts;
//cie_cooling_cieco: 25
dcr_dT[INDEX(25)] = (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]) * _dT_Ts;
//cie_optical_depth_approx: 26
dcr_dT[INDEX(26)] = 0.0;
}
__device__ void interpolate_drrate_dT(double *drr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
//dT = t2 - t1;
//inv_Ts = temp_out;
double _dT_Ts = 1.0 / ((t2 - t1) * temp_out);
//k01: 0
drr_dT[INDEX( 0)] = (rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id]) *_dT_Ts;
//k02: 1
drr_dT[INDEX( 1)] = (rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id]) *_dT_Ts;
//k03: 2
drr_dT[INDEX( 2)] = (rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id]) *_dT_Ts;
//k04: 3
drr_dT[INDEX( 3)] = (rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id]) *_dT_Ts;
//k05: 4
drr_dT[INDEX( 4)] = (rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id]) *_dT_Ts;
//k06: 5
drr_dT[INDEX( 5)] = (rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id]) *_dT_Ts;
//k07: 6
drr_dT[INDEX( 6)] = (rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id]) *_dT_Ts;
//k08: 7
drr_dT[INDEX( 7)] = (rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id]) *_dT_Ts;
//k09: 8
drr_dT[INDEX( 8)] = (rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id]) *_dT_Ts;
//k10: 9
drr_dT[INDEX( 9)] = (rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id]) *_dT_Ts;
//k11: 10
drr_dT[INDEX(10)] = (rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id]) *_dT_Ts;
//k12: 11
drr_dT[INDEX(11)] = (rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id]) *_dT_Ts;
//k13: 12
drr_dT[INDEX(12)] = (rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id]) *_dT_Ts;
//k14: 13
drr_dT[INDEX(13)] = (rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id]) *_dT_Ts;
//k15: 14
drr_dT[INDEX(14)] = (rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id]) *_dT_Ts;
//k16: 15
drr_dT[INDEX(15)] = (rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id]) *_dT_Ts;
//k17: 16
drr_dT[INDEX(16)] = (rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id]) *_dT_Ts;
//k18: 17
drr_dT[INDEX(17)] = (rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id]) *_dT_Ts;
//k19: 18
drr_dT[INDEX(18)] = (rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id]) *_dT_Ts;
//k20: 19
// drr_dT[INDEX(19)] = (rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id]) *_dT_Ts;
//k21: 20
drr_dT[INDEX(20)] = (rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id]) *_dT_Ts;
//k22: 21
drr_dT[INDEX(21)] = (rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id]) *_dT_Ts;
//k23: 22
// drr_dT[INDEX(22)] = (rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id]) *_dT_Ts;
}
__device__ void dydt (const double t, const double pres, const double * __restrict__ y_in, double * __restrict__ dy, const mechanism_memory * d_mem) {
extern __shared__ double y_scale[];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// int NSPECIES = 10;
const int NRATE = 23;
const int NCOOL = 26;
double * local_reaction_rates = d_mem->reaction_rates;
double * local_cooling_rates = d_mem->cooling_rates ;
// scale related piece
double * y = d_mem->temp_array; // working space for scaling the variable back;
cvklu_data *rate_data = d_mem->chemistry_data;
// these should be retreieved from d_mem object
double T_local = d_mem->temperature[T_ID];
double Tge = d_mem->dTs_ge[T_ID];
const double mdensity = d_mem->density[T_ID];
const double inv_mdensity = 1.0 / mdensity;
const double h2_optical_depth_approx = d_mem->h2_optical_depth_approx[T_ID];
// scaling the input vector back to cgs units
#ifdef SCALE_INPUT
//const double * __restrict__ scale = d_mem->scale;
//const double * __restrict__ inv_scale = d_mem->inv_scale;
#pragma unroll
for (int i = 0; i < 10; i++){
y[INDEX(i)] = y_in[INDEX(i)]*y_scale[S_INDEX(i)];
// printf( "y_in[%d] = %0.5g; scale[%d] = %0.5g\n", i, y_in[INDEX(i)], i, scale[INDEX(i)] );
}
#else
#pragma unroll
for (int i = 0; i < 10; i++){
y[INDEX(i)] = y_in[INDEX(i)];
}
#endif
evaluate_temperature ( &T_local, &Tge, y , mdensity, rate_data );
interpolate_reaction_rates( local_reaction_rates, T_local, rate_data);
interpolate_cooling_rates ( local_cooling_rates , T_local, rate_data);
//# 0: H2_1
dy[INDEX(0)] = local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] - local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] - local_reaction_rates[INDEX(11)]*y[INDEX(0)]*y[INDEX(8)] - local_reaction_rates[INDEX(12)]*y[INDEX(0)]*y[INDEX(2)] + local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)] + local_reaction_rates[INDEX(20)]*y[INDEX(0)]*y[INDEX(2)]*y[INDEX(2)] + local_reaction_rates[INDEX(21)]*y[INDEX(2)]*y[INDEX(2)]*y[INDEX(2)];
//# 1: H2_2
dy[INDEX(1)] = local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] - local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] + local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] + local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)] - local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)];
//# 2: H_1
dy[INDEX(2)] = -local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] + local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] - local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] - local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] + local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] + 2*local_reaction_rates[INDEX(11)]*y[INDEX(0)]*y[INDEX(8)] + 2*local_reaction_rates[INDEX(12)]*y[INDEX(0)]*y[INDEX(2)] + local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] + local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] + 2*local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] + 2*local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)] + local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)] - 2*local_reaction_rates[INDEX(20)]*y[INDEX(0)]*y[INDEX(2)]*y[INDEX(2)] - 2*local_reaction_rates[INDEX(21)]*y[INDEX(2)]*y[INDEX(2)]*y[INDEX(2)];
//# 3: H_2
dy[INDEX(3)] = local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] - local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] + local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] - local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] - local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)];
//# 4: H_m0
dy[INDEX(4)] = local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] - local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)];
//# 5: He_1
dy[INDEX(5)] = -local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] + local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)];
//# 6: He_2
dy[INDEX(6)] = local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] - local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] + local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)];
//# 7: He_3
dy[INDEX(7)] = local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)];
//# 8: de
dy[INDEX(8)] = local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] + local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] - local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)] + local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)] - local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] + local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] + local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)];
//# 9: ge
dy[INDEX(9)] = -2.01588*y[INDEX(0)]*local_cooling_rates[INDEX(25)]*local_cooling_rates[INDEX(26)]*mdensity - y[INDEX(0)]*local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y[INDEX(0)]*local_cooling_rates[INDEX(13)] + y[INDEX(2)]*local_cooling_rates[INDEX(12)] + y[INDEX(3)]*local_cooling_rates[INDEX(15)] + y[INDEX(5)]*local_cooling_rates[INDEX(14)] + y[INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) - y[INDEX(2)]*local_cooling_rates[INDEX(0)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(2)]*local_cooling_rates[INDEX(4)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(3)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(7)] - y[INDEX(5)]*local_cooling_rates[INDEX(5)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(2)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(1)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(3)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(9)] - y[INDEX(7)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*(y[INDEX(3)] + y[INDEX(6)] + 4.0*y[INDEX(7)]) - local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(18)]*y[INDEX(8)]*( T_local - 2.73) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y[INDEX(0)]*local_cooling_rates[INDEX(24)] + y[INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y[INDEX(0)]*y[INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y[INDEX(2)], 3)*local_cooling_rates[INDEX(20)]);
/*
dy[INDEX(9)] = -2.01588*y[INDEX(0)]*local_cooling_rates[INDEX(25)]*local_cooling_rates[INDEX(26)]*mdensity - y[INDEX(0)]*local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y[INDEX(0)]*local_cooling_rates[INDEX(13)] + y[INDEX(2)]*local_cooling_rates[INDEX(12)] + y[INDEX(3)]*local_cooling_rates[INDEX(15)] + y[INDEX(5)]*local_cooling_rates[INDEX(14)] + y[INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) - y[INDEX(2)]*local_cooling_rates[INDEX(0)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(2)]*local_cooling_rates[INDEX(4)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(3)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(7)] - y[INDEX(5)]*local_cooling_rates[INDEX(5)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(2)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(1)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(3)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(9)] - y[INDEX(7)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*(y[INDEX(3)] + y[INDEX(6)] + 4.0*y[INDEX(7)]) - local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(18)]*y[INDEX(8)]*(T_local - 2.73) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y[INDEX(0)]*local_cooling_rates[INDEX(24)] + y[INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y[INDEX(0)]*y[INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y[INDEX(2)], 3)*local_cooling_rates[INDEX(20)]);
*/
dy[INDEX(9)] *= inv_mdensity;
#ifdef SCALE_INPUT
// scaling the dydt vector back to code untis
#pragma unroll
for (int i = 0; i< 10; i++){
dy[INDEX(i)] /= y_scale[S_INDEX(i)];
}
#endif
/*
if ( T_ID == 0 ){
*d_mem->rhs_call += 1;
printf("t = %0.5g; rhs_call = %d\n", t, *d_mem->rhs_call );
}
*/
/*
if ( T_ID == 0 ){
printf("time = %0.5g, at temp = %0.5g\n", t, T_local);
for (int i = 0; i< 10; i++){
printf("from tid[%d]: dy[%d] = %0.5g, y = %0.5g at t = %0.5g \n", T_ID, i, dy[INDEX(i)], y_in[INDEX(i)], t);
}
}
*/
// printf(" \n");
// }
}
| 1460b33db81554677d3ea97032f49e6dea6feddc.cu | #include "header.cuh"
#include "gpu_memory.cuh"
__device__ void interpolate_gamma( cvklu_data *rate_data, double T, double *gamma, double *dgamma_dT )
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
log_temp_out = log(T);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
*gamma = rate_data->g_gammaH2_1[bin_id] + Tdef * (rate_data->g_gammaH2_1[bin_id+1] - rate_data->g_gammaH2_1[bin_id]);
*dgamma_dT = rate_data->g_dgammaH2_1_dT[bin_id] + Tdef * (rate_data->g_dgammaH2_1_dT[bin_id+1] - rate_data->g_dgammaH2_1_dT[bin_id]);
}
__device__ void evaluate_temperature( double* T, double* dTs_ge, const double *y, const double mdensity, cvklu_data *rate_data )
{
// iterate temperature to convergence
double t, tnew, tdiff;
double dge, dge_dT;
double gammaH2, dgammaH2_dT, _gammaH2_m1;
int count = 0;
int MAX_ITERATION = 100;
double gamma = 5./3.;
double _gamma_m1 = 1.0 / (gamma - 1.0);
double kb = 1.3806504e-16; // Boltzamann constant [erg/K]
// prepare t, tnew for the newton's iteration;
t = *T;
if (t != t) t = 1000.0;
tnew = 1.1*t;
tdiff = tnew - t;
while ( tdiff/ tnew > 0.001 ){
// We do Newton's Iteration to calculate the temperature
// Since gammaH2 is dependent on the temperature too!
interpolate_gamma( rate_data, t, &gammaH2, &dgammaH2_dT );
_gammaH2_m1 = 1.0 / (gammaH2 - 1.0);
dge_dT = t*kb*(-y[INDEX(0)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT - y[INDEX(1)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT)/(mdensity)
+ kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1 + y[INDEX(4)]*_gamma_m1
+ y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity);
dge = t*kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1
+ y[INDEX(4)]*_gamma_m1 + y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity) - y[INDEX(9)];
//This is the change in ge for each iteration
tnew = t - dge/dge_dT;
count += 1;
tdiff = fabs(t - tnew);
t = tnew;
if (count > MAX_ITERATION){
printf("T[tid = %d] failed to converge (iteration: %d); at T = %0.3g \n", T_ID, count, tnew );
}
if ( t!= t && T_ID == 0){
printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", T_ID, t, count, y[INDEX(9)], gammaH2);
t = 1000.0;
for (int i = 0; i < 10; i++){
printf("y[INDEX(%d)] = %0.5g \n", i, y[INDEX(i)]);
}
break;
}
}
// update the temperature;
*T = t;
*dTs_ge = 1.0 / dge_dT;
// printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", tid, t, count, y[INDEX(9)], gammaH2);
}
__device__ void interpolate_reaction_rates( double *reaction_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, invTs, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
invTs = 1.0 / temp_out;
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
reaction_rates_out[INDEX( 0)] = rate_data->r_k01[bin_id] + Tdef * (rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id]);
reaction_rates_out[INDEX( 1)] = rate_data->r_k02[bin_id] + Tdef * (rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id]);
reaction_rates_out[INDEX( 2)] = rate_data->r_k03[bin_id] + Tdef * (rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id]);
reaction_rates_out[INDEX( 3)] = rate_data->r_k04[bin_id] + Tdef * (rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id]);
reaction_rates_out[INDEX( 4)] = rate_data->r_k05[bin_id] + Tdef * (rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id]);
reaction_rates_out[INDEX( 5)] = rate_data->r_k06[bin_id] + Tdef * (rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id]);
reaction_rates_out[INDEX( 6)] = rate_data->r_k07[bin_id] + Tdef * (rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id]);
reaction_rates_out[INDEX( 7)] = rate_data->r_k08[bin_id] + Tdef * (rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id]);
reaction_rates_out[INDEX( 8)] = rate_data->r_k09[bin_id] + Tdef * (rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id]);
reaction_rates_out[INDEX( 9)] = rate_data->r_k10[bin_id] + Tdef * (rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id]);
reaction_rates_out[INDEX(10)] = rate_data->r_k11[bin_id] + Tdef * (rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id]);
reaction_rates_out[INDEX(11)] = rate_data->r_k12[bin_id] + Tdef * (rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id]);
reaction_rates_out[INDEX(12)] = rate_data->r_k13[bin_id] + Tdef * (rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id]);
reaction_rates_out[INDEX(13)] = rate_data->r_k14[bin_id] + Tdef * (rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id]);
reaction_rates_out[INDEX(14)] = rate_data->r_k15[bin_id] + Tdef * (rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id]);
reaction_rates_out[INDEX(15)] = rate_data->r_k16[bin_id] + Tdef * (rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id]);
reaction_rates_out[INDEX(16)] = rate_data->r_k17[bin_id] + Tdef * (rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id]);
reaction_rates_out[INDEX(17)] = rate_data->r_k18[bin_id] + Tdef * (rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id]);
reaction_rates_out[INDEX(18)] = rate_data->r_k19[bin_id] + Tdef * (rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id]);
//reaction_rates_out[INDEX(19)] = rate_data->r_k20[bin_id] + Tdef * (rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id]);
reaction_rates_out[INDEX(20)] = rate_data->r_k21[bin_id] + Tdef * (rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id]);
reaction_rates_out[INDEX(21)] = rate_data->r_k22[bin_id] + Tdef * (rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id]);
//reaction_rates_out[INDEX(22)] = rate_data->r_k23[bin_id] + Tdef * (rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id]);
}
__device__ void interpolate_cooling_rates( double *cooling_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
/*
if (T_ID == 0){
printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out);
}
*/
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
cooling_rates_out[INDEX( 0)] = rate_data->c_ceHI_ceHI[bin_id] + Tdef * (rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id]);
cooling_rates_out[INDEX( 1)] = rate_data->c_ceHeI_ceHeI[bin_id] + Tdef * (rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id]);
cooling_rates_out[INDEX( 2)] = rate_data->c_ceHeII_ceHeII[bin_id] + Tdef * (rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id]);
cooling_rates_out[INDEX( 3)] = rate_data->c_ciHeIS_ciHeIS[bin_id] + Tdef * (rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id]);
cooling_rates_out[INDEX( 4)] = rate_data->c_ciHI_ciHI[bin_id] + Tdef * (rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id]);
cooling_rates_out[INDEX( 5)] = rate_data->c_ciHeI_ciHeI[bin_id] + Tdef * (rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id]);
cooling_rates_out[INDEX( 6)] = rate_data->c_ciHeII_ciHeII[bin_id] + Tdef * (rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id]);
cooling_rates_out[INDEX( 7)] = rate_data->c_reHII_reHII[bin_id] + Tdef * (rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id]);
cooling_rates_out[INDEX( 8)] = rate_data->c_reHeII1_reHeII1[bin_id] + Tdef * (rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id]);
cooling_rates_out[INDEX( 9)] = rate_data->c_reHeII2_reHeII2[bin_id] + Tdef * (rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id]);
cooling_rates_out[INDEX(10)] = rate_data->c_reHeIII_reHeIII[bin_id] + Tdef * (rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id]);
cooling_rates_out[INDEX(11)] = rate_data->c_brem_brem[bin_id] + Tdef * (rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id]);
cooling_rates_out[INDEX(12)] = rate_data->c_gloverabel08_gaHI[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id]);
cooling_rates_out[INDEX(13)] = rate_data->c_gloverabel08_gaH2[bin_id] + Tdef * (rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id]);
cooling_rates_out[INDEX(14)] = rate_data->c_gloverabel08_gaHe[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id]);
cooling_rates_out[INDEX(15)] = rate_data->c_gloverabel08_gaHp[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id]);
cooling_rates_out[INDEX(16)] = rate_data->c_gloverabel08_gael[bin_id] + Tdef * (rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id]);
cooling_rates_out[INDEX(17)] = rate_data->c_gloverabel08_h2lte[bin_id] + Tdef * (rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id]);
cooling_rates_out[INDEX(18)] = rate_data->c_compton_comp_[bin_id] + Tdef * (rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id]);
cooling_rates_out[INDEX(19)] = rate_data->c_gammah_gammah[bin_id] + Tdef * (rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id]);
cooling_rates_out[INDEX(20)] = rate_data->c_h2formation_h2mheat[bin_id] + Tdef * (rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id]);
cooling_rates_out[INDEX(21)] = rate_data->c_h2formation_h2mcool[bin_id] + Tdef * (rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id]);
cooling_rates_out[INDEX(22)] = rate_data->c_h2formation_ncrn[bin_id] + Tdef * (rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id]);
cooling_rates_out[INDEX(23)] = rate_data->c_h2formation_ncrd1[bin_id] + Tdef * (rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id]);
cooling_rates_out[INDEX(24)] = rate_data->c_h2formation_ncrd2[bin_id] + Tdef * (rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id]);
cooling_rates_out[INDEX(25)] = rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
cooling_rates_out[INDEX(26)] = 1.0; //rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
}
__device__ void interpolate_dcrate_dT(double *dcr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
// dT = t2 - t1;
// inv_Ts = temp_out;
double _dT_Ts = 1.0 / ( (t2 -t1)* temp_out );
//ceHI_ceHI: 0
dcr_dT[INDEX( 0)] = (rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id]) * _dT_Ts;
//ceHeI_ceHeI: 1
dcr_dT[INDEX( 1)] = (rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id]) * _dT_Ts;
//ceHeII_ceHeII: 2
dcr_dT[INDEX( 2)] = (rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id]) * _dT_Ts;
//ciHeIS_ciHeIS: 3
dcr_dT[INDEX( 3)] = (rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id]) * _dT_Ts;
//ciHI_ciHI: 4
dcr_dT[INDEX( 4)] = (rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id]) * _dT_Ts;
//ciHeI_ciHeI: 5
dcr_dT[INDEX( 5)] = (rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id]) * _dT_Ts;
//ciHeII_ciHeII: 6
dcr_dT[INDEX( 6)] = (rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id]) * _dT_Ts;
//reHII_reHII: 7
dcr_dT[INDEX( 7)] = (rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id]) * _dT_Ts;
//reHeII1_reHeII1: 8
dcr_dT[INDEX( 8)] = (rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id]) * _dT_Ts;
//reHeII2_reHeII2: 9
dcr_dT[INDEX( 9)] = (rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id]) * _dT_Ts;
//reHeIII_reHeIII: 10
dcr_dT[INDEX(10)] = (rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id]) * _dT_Ts;
//brem_brem: 11
dcr_dT[INDEX(11)] = (rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id]) * _dT_Ts;
//gloverabel08_gaHI: 12
dcr_dT[INDEX(12)] = (rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id]) * _dT_Ts;
//gloverabel08_gaH2: 13
dcr_dT[INDEX(13)] = (rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id]) * _dT_Ts;
//gloverabel08_gaHe: 14
dcr_dT[INDEX(14)] = (rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id]) * _dT_Ts;
//gloverabel08_gaHp: 15
dcr_dT[INDEX(15)] = (rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id]) * _dT_Ts;
//gloverabel08_gael: 16
dcr_dT[INDEX(16)] = (rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id]) * _dT_Ts;
//gloverabel08_h2lte: 17
dcr_dT[INDEX(17)] = (rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id]) * _dT_Ts;
//compton_comp_: 18
dcr_dT[INDEX(18)] = (rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id]) * _dT_Ts;
//gammah_gammah: 19
dcr_dT[INDEX(19)] = (rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id]) * _dT_Ts;
//h2formation_h2mheat: 20
dcr_dT[INDEX(20)] = (rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id]) * _dT_Ts;
//h2formation_h2mcool: 21
dcr_dT[INDEX(21)] = (rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id]) * _dT_Ts;
//h2formation_ncrn: 22
dcr_dT[INDEX(22)] = (rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id]) * _dT_Ts;
//h2formation_ncrd1: 23
dcr_dT[INDEX(23)] = (rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id]) * _dT_Ts;
//h2formation_ncrd2: 24
dcr_dT[INDEX(24)] = (rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id]) * _dT_Ts;
//cie_cooling_cieco: 25
dcr_dT[INDEX(25)] = (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]) * _dT_Ts;
//cie_optical_depth_approx: 26
dcr_dT[INDEX(26)] = 0.0;
}
__device__ void interpolate_drrate_dT(double *drr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
//dT = t2 - t1;
//inv_Ts = temp_out;
double _dT_Ts = 1.0 / ((t2 - t1) * temp_out);
//k01: 0
drr_dT[INDEX( 0)] = (rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id]) *_dT_Ts;
//k02: 1
drr_dT[INDEX( 1)] = (rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id]) *_dT_Ts;
//k03: 2
drr_dT[INDEX( 2)] = (rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id]) *_dT_Ts;
//k04: 3
drr_dT[INDEX( 3)] = (rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id]) *_dT_Ts;
//k05: 4
drr_dT[INDEX( 4)] = (rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id]) *_dT_Ts;
//k06: 5
drr_dT[INDEX( 5)] = (rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id]) *_dT_Ts;
//k07: 6
drr_dT[INDEX( 6)] = (rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id]) *_dT_Ts;
//k08: 7
drr_dT[INDEX( 7)] = (rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id]) *_dT_Ts;
//k09: 8
drr_dT[INDEX( 8)] = (rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id]) *_dT_Ts;
//k10: 9
drr_dT[INDEX( 9)] = (rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id]) *_dT_Ts;
//k11: 10
drr_dT[INDEX(10)] = (rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id]) *_dT_Ts;
//k12: 11
drr_dT[INDEX(11)] = (rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id]) *_dT_Ts;
//k13: 12
drr_dT[INDEX(12)] = (rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id]) *_dT_Ts;
//k14: 13
drr_dT[INDEX(13)] = (rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id]) *_dT_Ts;
//k15: 14
drr_dT[INDEX(14)] = (rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id]) *_dT_Ts;
//k16: 15
drr_dT[INDEX(15)] = (rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id]) *_dT_Ts;
//k17: 16
drr_dT[INDEX(16)] = (rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id]) *_dT_Ts;
//k18: 17
drr_dT[INDEX(17)] = (rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id]) *_dT_Ts;
//k19: 18
drr_dT[INDEX(18)] = (rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id]) *_dT_Ts;
//k20: 19
// drr_dT[INDEX(19)] = (rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id]) *_dT_Ts;
//k21: 20
drr_dT[INDEX(20)] = (rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id]) *_dT_Ts;
//k22: 21
drr_dT[INDEX(21)] = (rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id]) *_dT_Ts;
//k23: 22
// drr_dT[INDEX(22)] = (rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id]) *_dT_Ts;
}
__device__ void dydt (const double t, const double pres, const double * __restrict__ y_in, double * __restrict__ dy, const mechanism_memory * d_mem) {
extern __shared__ double y_scale[];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// int NSPECIES = 10;
const int NRATE = 23;
const int NCOOL = 26;
double * local_reaction_rates = d_mem->reaction_rates;
double * local_cooling_rates = d_mem->cooling_rates ;
// scale related piece
double * y = d_mem->temp_array; // working space for scaling the variable back;
cvklu_data *rate_data = d_mem->chemistry_data;
// these should be retreieved from d_mem object
double T_local = d_mem->temperature[T_ID];
double Tge = d_mem->dTs_ge[T_ID];
const double mdensity = d_mem->density[T_ID];
const double inv_mdensity = 1.0 / mdensity;
const double h2_optical_depth_approx = d_mem->h2_optical_depth_approx[T_ID];
// scaling the input vector back to cgs units
#ifdef SCALE_INPUT
//const double * __restrict__ scale = d_mem->scale;
//const double * __restrict__ inv_scale = d_mem->inv_scale;
#pragma unroll
for (int i = 0; i < 10; i++){
y[INDEX(i)] = y_in[INDEX(i)]*y_scale[S_INDEX(i)];
// printf( "y_in[%d] = %0.5g; scale[%d] = %0.5g\n", i, y_in[INDEX(i)], i, scale[INDEX(i)] );
}
#else
#pragma unroll
for (int i = 0; i < 10; i++){
y[INDEX(i)] = y_in[INDEX(i)];
}
#endif
evaluate_temperature ( &T_local, &Tge, y , mdensity, rate_data );
interpolate_reaction_rates( local_reaction_rates, T_local, rate_data);
interpolate_cooling_rates ( local_cooling_rates , T_local, rate_data);
//# 0: H2_1
dy[INDEX(0)] = local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] - local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] - local_reaction_rates[INDEX(11)]*y[INDEX(0)]*y[INDEX(8)] - local_reaction_rates[INDEX(12)]*y[INDEX(0)]*y[INDEX(2)] + local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)] + local_reaction_rates[INDEX(20)]*y[INDEX(0)]*y[INDEX(2)]*y[INDEX(2)] + local_reaction_rates[INDEX(21)]*y[INDEX(2)]*y[INDEX(2)]*y[INDEX(2)];
//# 1: H2_2
dy[INDEX(1)] = local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] - local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] + local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] + local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)] - local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)];
//# 2: H_1
dy[INDEX(2)] = -local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] + local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] - local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] - local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] + local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] + 2*local_reaction_rates[INDEX(11)]*y[INDEX(0)]*y[INDEX(8)] + 2*local_reaction_rates[INDEX(12)]*y[INDEX(0)]*y[INDEX(2)] + local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] + local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] + 2*local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] + 2*local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)] + local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)] - 2*local_reaction_rates[INDEX(20)]*y[INDEX(0)]*y[INDEX(2)]*y[INDEX(2)] - 2*local_reaction_rates[INDEX(21)]*y[INDEX(2)]*y[INDEX(2)]*y[INDEX(2)];
//# 3: H_2
dy[INDEX(3)] = local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] - local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] + local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] - local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] - local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)];
//# 4: H_m0
dy[INDEX(4)] = local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] - local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)];
//# 5: He_1
dy[INDEX(5)] = -local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] + local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)];
//# 6: He_2
dy[INDEX(6)] = local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] - local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] + local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)];
//# 7: He_3
dy[INDEX(7)] = local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)];
//# 8: de
dy[INDEX(8)] = local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] + local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] - local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)] + local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)] - local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] + local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] + local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)];
//# 9: ge
dy[INDEX(9)] = -2.01588*y[INDEX(0)]*local_cooling_rates[INDEX(25)]*local_cooling_rates[INDEX(26)]*mdensity - y[INDEX(0)]*local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y[INDEX(0)]*local_cooling_rates[INDEX(13)] + y[INDEX(2)]*local_cooling_rates[INDEX(12)] + y[INDEX(3)]*local_cooling_rates[INDEX(15)] + y[INDEX(5)]*local_cooling_rates[INDEX(14)] + y[INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) - y[INDEX(2)]*local_cooling_rates[INDEX(0)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(2)]*local_cooling_rates[INDEX(4)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(3)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(7)] - y[INDEX(5)]*local_cooling_rates[INDEX(5)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(2)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(1)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(3)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(9)] - y[INDEX(7)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*(y[INDEX(3)] + y[INDEX(6)] + 4.0*y[INDEX(7)]) - local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(18)]*y[INDEX(8)]*( T_local - 2.73) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y[INDEX(0)]*local_cooling_rates[INDEX(24)] + y[INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y[INDEX(0)]*y[INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y[INDEX(2)], 3)*local_cooling_rates[INDEX(20)]);
/*
dy[INDEX(9)] = -2.01588*y[INDEX(0)]*local_cooling_rates[INDEX(25)]*local_cooling_rates[INDEX(26)]*mdensity - y[INDEX(0)]*local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y[INDEX(0)]*local_cooling_rates[INDEX(13)] + y[INDEX(2)]*local_cooling_rates[INDEX(12)] + y[INDEX(3)]*local_cooling_rates[INDEX(15)] + y[INDEX(5)]*local_cooling_rates[INDEX(14)] + y[INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) - y[INDEX(2)]*local_cooling_rates[INDEX(0)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(2)]*local_cooling_rates[INDEX(4)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(3)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(7)] - y[INDEX(5)]*local_cooling_rates[INDEX(5)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(2)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(1)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(3)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(9)] - y[INDEX(7)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*(y[INDEX(3)] + y[INDEX(6)] + 4.0*y[INDEX(7)]) - local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(18)]*y[INDEX(8)]*(T_local - 2.73) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y[INDEX(0)]*local_cooling_rates[INDEX(24)] + y[INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y[INDEX(0)]*y[INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y[INDEX(2)], 3)*local_cooling_rates[INDEX(20)]);
*/
dy[INDEX(9)] *= inv_mdensity;
#ifdef SCALE_INPUT
// scaling the dydt vector back to code untis
#pragma unroll
for (int i = 0; i< 10; i++){
dy[INDEX(i)] /= y_scale[S_INDEX(i)];
}
#endif
/*
if ( T_ID == 0 ){
*d_mem->rhs_call += 1;
printf("t = %0.5g; rhs_call = %d\n", t, *d_mem->rhs_call );
}
*/
/*
if ( T_ID == 0 ){
printf("time = %0.5g, at temp = %0.5g\n", t, T_local);
for (int i = 0; i< 10; i++){
printf("from tid[%d]: dy[%d] = %0.5g, y = %0.5g at t = %0.5g \n", T_ID, i, dy[INDEX(i)], y_in[INDEX(i)], t);
}
}
*/
// printf(" \n");
// }
}
|
dd49e10cc92c363a80fec87009d8865a5876fc89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void cuSqrt(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = sqrt(a[i]);
}
} | dd49e10cc92c363a80fec87009d8865a5876fc89.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void cuSqrt(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = sqrt(a[i]);
}
} |
154dd83062d09eb41f7baa43fa13b78b8a7eeb17.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <iostream>
#include <vector>
#include <algorithm>
#include <iterator>
#include <type_traits>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/type_lists.hpp>
//TODO remove after PR 3490 merge
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <cudf/cudf.h>
#include <cudf/reduction.hpp>
#include <thrust/device_vector.h>
#include <cudf/detail/aggregation/aggregation.hpp>
using aggregation = cudf::experimental::aggregation;
using cudf::experimental::scan_type;
using cudf::experimental::include_nulls;
using cudf::column_view;
void print_view(column_view const& view, const char* msg = nullptr) {
std::cout << msg<< " {";
cudf::test::print(view); std::cout << "}\n";
}
// This is the main test feature
template <typename T>
struct ScanTest : public cudf::test::BaseFixture
{
void scan_test(
cudf::test::fixed_width_column_wrapper<T> const col_in,
cudf::test::fixed_width_column_wrapper<T> const expected_col_out,
std::unique_ptr<aggregation> const &agg, scan_type inclusive)
{
bool do_print = false;
auto int_values = cudf::test::to_host<T>(col_in);
auto exact_values = cudf::test::to_host<T>(expected_col_out);
this->val_check(std::get<0>(int_values), do_print, "input = ");
this->val_check(std::get<0>(exact_values), do_print, "exact = ");
const column_view input_view = col_in;
std::unique_ptr<cudf::column> col_out;
CUDF_EXPECT_NO_THROW( col_out = cudf::experimental::scan(input_view, agg, inclusive) );
const column_view result_view = col_out->view();
cudf::test::expect_column_properties_equal(input_view, result_view);
cudf::test::expect_columns_equal(expected_col_out, result_view);
auto host_result = cudf::test::to_host<T>(result_view);
this->val_check(std::get<0>(host_result), do_print, "result = ");
}
template <typename Ti>
void val_check(std::vector<Ti> const & v, bool do_print=false, const char* msg = nullptr){
if( do_print ){
std::cout << msg << " {";
std::for_each(v.begin(), v.end(), [](Ti i){ std::cout << ", " << i;});
std::cout << "}" << std::endl;
}
range_check(v);
}
// make sure all elements in the range of sint8([-128, 127])
template <typename Ti>
void range_check(std::vector<Ti> const & v){
std::for_each(v.begin(), v.end(),
[](Ti i){
ASSERT_GE(static_cast<int>(i), -128);
ASSERT_LT(static_cast<int>(i), 128);
});
}
};
using Types = cudf::test::NumericTypes;
//using Types = testing::Types<int32_t>;
TYPED_TEST_CASE(ScanTest, Types);
// ------------------------------------------------------------------------
TYPED_TEST(ScanTest, Min)
{
std::vector<TypeParam> v({123, 64, 63, 99, -5, 123, -16, -120, -111});
std::vector<bool> b({ 1, 0, 1, 1, 1, 1, 0, 1, 1});
std::vector<TypeParam> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc=v[0]](auto i) mutable { acc = ::min(acc, i); return acc; }
);
this->scan_test({v.begin(), v.end()},
{exact.begin(), exact.end()},
cudf::experimental::make_min_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=v[0]](auto i, bool b) mutable { if(b) acc = ::min(acc, i); return acc; }
);
this->scan_test({v.begin(), v.end(), b.begin()},
{exact.begin(), exact.end(), b.begin()},
cudf::experimental::make_min_aggregation(), scan_type::INCLUSIVE);
}
TYPED_TEST(ScanTest, Max)
{
std::vector<TypeParam> v({-120, 5, 0, -120, -111, 64, 63, 99, 123, -16});
std::vector<bool> b({ 1, 0, 1, 1, 1, 1, 0, 1, 1, 1});
std::vector<TypeParam> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc=v[0]](auto i) mutable { acc = ::max(acc, i); return acc; }
);
this->scan_test({v.begin(), v.end()},
{exact.begin(), exact.end()},
cudf::experimental::make_max_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=v[0]](auto i, bool b) mutable { if(b) acc = ::max(acc, i); return acc; }
);
this->scan_test({v.begin(), v.end(), b.begin()},
{exact.begin(), exact.end(), b.begin()},
cudf::experimental::make_max_aggregation(), scan_type::INCLUSIVE);
}
TYPED_TEST(ScanTest, Product)
{
std::vector<TypeParam> v({5, -1, 1, 3, -2, 4});
std::vector<bool> b({1, 1, 1, 0, 1, 1});
std::vector<TypeParam> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc=1](auto i) mutable { acc *= i; return acc; }
);
this->scan_test({v.begin(), v.end()},
{exact.begin(), exact.end()},
cudf::experimental::make_product_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=1](auto i, bool b) mutable { if(b) acc *= i; return acc; }
);
this->scan_test({v.begin(), v.end(), b.begin()},
{exact.begin(), exact.end(), b.begin()},
cudf::experimental::make_product_aggregation(), scan_type::INCLUSIVE);
}
TYPED_TEST(ScanTest, Sum)
{
std::vector<TypeParam> v({-120, 5, 6, 113, -111, 64, -63, 9, 34, -16});
std::vector<bool> b({ 1, 0, 1, 1, 0, 0, 1, 1, 1, 1});
std::vector<TypeParam> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc=0](auto i) mutable { acc += i; return acc; }
);
this->scan_test({v.begin(), v.end()},
{exact.begin(), exact.end()},
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=0](auto i, bool b) mutable { if(b) acc += i; return acc; }
);
this->scan_test({v.begin(), v.end(), b.begin()},
{exact.begin(), exact.end(), b.begin()},
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE);
}
struct ScanStringTest : public cudf::test::BaseFixture {
void scan_test(cudf::test::strings_column_wrapper const& col_in,
cudf::test::strings_column_wrapper const& expected_col_out,
std::unique_ptr<aggregation> const &agg, scan_type inclusive)
{
bool do_print = false;
if (do_print) {
std::cout << "input = {"; cudf::test::print(col_in); std::cout<<"}\n";
std::cout << "expect = {"; cudf::test::print(expected_col_out); std::cout<<"}\n";
}
const column_view input_view = col_in;
std::unique_ptr<cudf::column> col_out;
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(input_view, agg, inclusive));
const column_view result_view = col_out->view();
cudf::test::expect_column_properties_equal(input_view, result_view);
cudf::test::expect_columns_equal(expected_col_out, result_view);
if (do_print) {
std::cout << "result = {"; cudf::test::print(result_view); std::cout<<"}\n";
}
}
};
TEST_F(ScanStringTest, Min)
{
// data and valid arrays
std::vector<std::string> v({"one", "two", "three", "four", "five", "six", "seven", "eight", "nine"});
std::vector<bool> b({ 1, 0, 1, 1, 0, 0, 1, 1, 1});
std::vector<std::string> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc = v[0]](auto i) mutable { acc = ::min(acc, i); return acc; });
// string column without nulls
cudf::test::strings_column_wrapper col_nonulls(v.begin(), v.end());
cudf::test::strings_column_wrapper expected1(exact.begin(), exact.end());
this->scan_test(col_nonulls, expected1,
cudf::experimental::make_min_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=v[0]](auto i, bool b) mutable { if(b) acc = ::min(acc, i); return acc; }
);
// string column with nulls
cudf::test::strings_column_wrapper col_nulls(v.begin(), v.end(), b.begin());
cudf::test::strings_column_wrapper expected2(exact.begin(), exact.end(), b.begin());
this->scan_test(col_nulls, expected2,
cudf::experimental::make_min_aggregation(), scan_type::INCLUSIVE);
}
TEST_F(ScanStringTest, Max)
{
// data and valid arrays
std::vector<std::string> v({"one", "two", "three", "four", "five", "six", "seven", "eight", "nine"});
std::vector<bool> b({ 1, 0, 1, 1, 0, 0, 1, 1, 1});
std::vector<std::string> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc = v[0]](auto i) mutable { acc = ::max(acc, i); return acc; });
// string column without nulls
cudf::test::strings_column_wrapper col_nonulls(v.begin(), v.end());
cudf::test::strings_column_wrapper expected1(exact.begin(), exact.end());
this->scan_test(col_nonulls, expected1, cudf::experimental::make_max_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=v[0]](auto i, bool b) mutable { if(b) acc = ::max(acc, i); return acc; }
);
// string column with nulls
cudf::test::strings_column_wrapper col_nulls(v.begin(), v.end(), b.begin());
cudf::test::strings_column_wrapper expected2(exact.begin(), exact.end(), b.begin());
this->scan_test(col_nulls, expected2, cudf::experimental::make_max_aggregation(), scan_type::INCLUSIVE);
}
TYPED_TEST(ScanTest, skip_nulls)
{
bool do_print=false;
std::vector<TypeParam> v{1,2,3,4,5,6,7,8,1,1};
std::vector<bool> b{1,1,1,1,1,0,1,0,1,1};
cudf::test::fixed_width_column_wrapper<TypeParam> const col_in{v.begin(), v.end(),
b.begin()};
const column_view input_view = col_in;
std::unique_ptr<cudf::column> col_out;
//test output calculation
std::vector<TypeParam> out_v(input_view.size());
std::vector<bool> out_b(input_view.size());
std::transform(v.cbegin(), v.cend(), b.cbegin(),
out_v.begin(),
[acc=0](auto i, bool b) mutable { if(b) (acc += i); return acc; }
);
std::transform(b.cbegin(), b.cend(),
out_b.begin(),
[acc=true](auto i) mutable { acc = acc && i; return acc; }
);
//skipna=true (default)
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(input_view,
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE, include_nulls::NO));
cudf::test::fixed_width_column_wrapper<TypeParam> expected_col_out1{
out_v.begin(), out_v.end(), b.cbegin()};
cudf::test::expect_column_properties_equal(expected_col_out1, col_out->view());
cudf::test::expect_columns_equal(expected_col_out1, col_out->view());
if(do_print) {
print_view(expected_col_out1, "expect = ");
print_view(col_out->view(), "result = ");
}
//skipna=false
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(input_view,
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE, include_nulls::YES));
cudf::test::fixed_width_column_wrapper<TypeParam> expected_col_out2{
out_v.begin(), out_v.end(), out_b.begin()};
if(do_print) {
print_view(expected_col_out2, "expect = ");
print_view(col_out->view(), "result = ");
}
cudf::test::expect_column_properties_equal(expected_col_out2, col_out->view());
cudf::test::expect_columns_equal(expected_col_out2, col_out->view());
}
TEST_F(ScanStringTest, skip_nulls)
{
bool do_print=false;
// data and valid arrays
std::vector<std::string> v({"one", "two", "three", "four", "five", "six", "seven", "eight", "nine"});
std::vector<bool> b({ 1, 1, 1, 0, 0, 0, 1, 1, 1});
std::vector<std::string> exact(v.size());
std::vector<bool> out_b(v.size());
// test output calculation
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=v[0]](auto i, bool b) mutable { if(b) acc = ::max(acc, i); return acc; }
);
std::transform(b.cbegin(), b.cend(),
out_b.begin(),
[acc=true](auto i) mutable { acc = acc && i; return acc; }
);
// string column with nulls
cudf::test::strings_column_wrapper col_nulls(v.begin(), v.end(), b.begin());
cudf::test::strings_column_wrapper expected2(exact.begin(), exact.end(), out_b.begin());
std::unique_ptr<cudf::column> col_out;
//skipna=false
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(col_nulls,
cudf::experimental::make_max_aggregation(), scan_type::INCLUSIVE, include_nulls::YES));
if(do_print) {
print_view(expected2, "expect = ");
print_view(col_out->view(), "result = ");
}
cudf::test::expect_column_properties_equal(expected2, col_out->view());
cudf::test::expect_columns_equal(expected2, col_out->view());
//Exclusive scan string not supported.
CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::scan(col_nulls,
cudf::experimental::make_min_aggregation(), scan_type::EXCLUSIVE, include_nulls::NO)),
"String types supports only inclusive min/max for `cudf::scan`");
CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::scan(col_nulls,
cudf::experimental::make_min_aggregation(), scan_type::EXCLUSIVE, include_nulls::YES)),
"String types supports only inclusive min/max for `cudf::scan`");
}
TYPED_TEST(ScanTest, EmptyColumnskip_nulls)
{
bool do_print=false;
std::vector<TypeParam> v{};
std::vector<bool> b{};
cudf::test::fixed_width_column_wrapper<TypeParam> const col_in{v.begin(), v.end(),
b.begin()};
std::unique_ptr<cudf::column> col_out;
//test output calculation
std::vector<TypeParam> out_v(v.size());
std::vector<bool> out_b(v.size());
//skipna=true (default)
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(col_in,
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE, include_nulls::NO));
cudf::test::fixed_width_column_wrapper<TypeParam> expected_col_out1{
out_v.begin(), out_v.end(), b.cbegin()};
cudf::test::expect_column_properties_equal(expected_col_out1, col_out->view());
cudf::test::expect_columns_equal(expected_col_out1, col_out->view());
if(do_print) {
print_view(expected_col_out1, "expect = ");
print_view(col_out->view(), "result = ");
}
//skipna=false
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(col_in,
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE, include_nulls::YES));
cudf::test::fixed_width_column_wrapper<TypeParam> expected_col_out2{
out_v.begin(), out_v.end(), out_b.begin()};
if(do_print) {
print_view(expected_col_out2, "expect = ");
print_view(col_out->view(), "result = ");
}
cudf::test::expect_column_properties_equal(expected_col_out2, col_out->view());
cudf::test::expect_columns_equal(expected_col_out2, col_out->view());
} | 154dd83062d09eb41f7baa43fa13b78b8a7eeb17.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <iostream>
#include <vector>
#include <algorithm>
#include <iterator>
#include <type_traits>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/type_lists.hpp>
//TODO remove after PR 3490 merge
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <cudf/cudf.h>
#include <cudf/reduction.hpp>
#include <thrust/device_vector.h>
#include <cudf/detail/aggregation/aggregation.hpp>
using aggregation = cudf::experimental::aggregation;
using cudf::experimental::scan_type;
using cudf::experimental::include_nulls;
using cudf::column_view;
void print_view(column_view const& view, const char* msg = nullptr) {
std::cout << msg<< " {";
cudf::test::print(view); std::cout << "}\n";
}
// This is the main test feature
template <typename T>
struct ScanTest : public cudf::test::BaseFixture
{
void scan_test(
cudf::test::fixed_width_column_wrapper<T> const col_in,
cudf::test::fixed_width_column_wrapper<T> const expected_col_out,
std::unique_ptr<aggregation> const &agg, scan_type inclusive)
{
bool do_print = false;
auto int_values = cudf::test::to_host<T>(col_in);
auto exact_values = cudf::test::to_host<T>(expected_col_out);
this->val_check(std::get<0>(int_values), do_print, "input = ");
this->val_check(std::get<0>(exact_values), do_print, "exact = ");
const column_view input_view = col_in;
std::unique_ptr<cudf::column> col_out;
CUDF_EXPECT_NO_THROW( col_out = cudf::experimental::scan(input_view, agg, inclusive) );
const column_view result_view = col_out->view();
cudf::test::expect_column_properties_equal(input_view, result_view);
cudf::test::expect_columns_equal(expected_col_out, result_view);
auto host_result = cudf::test::to_host<T>(result_view);
this->val_check(std::get<0>(host_result), do_print, "result = ");
}
template <typename Ti>
void val_check(std::vector<Ti> const & v, bool do_print=false, const char* msg = nullptr){
if( do_print ){
std::cout << msg << " {";
std::for_each(v.begin(), v.end(), [](Ti i){ std::cout << ", " << i;});
std::cout << "}" << std::endl;
}
range_check(v);
}
// make sure all elements in the range of sint8([-128, 127])
template <typename Ti>
void range_check(std::vector<Ti> const & v){
std::for_each(v.begin(), v.end(),
[](Ti i){
ASSERT_GE(static_cast<int>(i), -128);
ASSERT_LT(static_cast<int>(i), 128);
});
}
};
using Types = cudf::test::NumericTypes;
//using Types = testing::Types<int32_t>;
TYPED_TEST_CASE(ScanTest, Types);
// ------------------------------------------------------------------------
TYPED_TEST(ScanTest, Min)
{
std::vector<TypeParam> v({123, 64, 63, 99, -5, 123, -16, -120, -111});
std::vector<bool> b({ 1, 0, 1, 1, 1, 1, 0, 1, 1});
std::vector<TypeParam> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc=v[0]](auto i) mutable { acc = std::min(acc, i); return acc; }
);
this->scan_test({v.begin(), v.end()},
{exact.begin(), exact.end()},
cudf::experimental::make_min_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=v[0]](auto i, bool b) mutable { if(b) acc = std::min(acc, i); return acc; }
);
this->scan_test({v.begin(), v.end(), b.begin()},
{exact.begin(), exact.end(), b.begin()},
cudf::experimental::make_min_aggregation(), scan_type::INCLUSIVE);
}
TYPED_TEST(ScanTest, Max)
{
std::vector<TypeParam> v({-120, 5, 0, -120, -111, 64, 63, 99, 123, -16});
std::vector<bool> b({ 1, 0, 1, 1, 1, 1, 0, 1, 1, 1});
std::vector<TypeParam> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc=v[0]](auto i) mutable { acc = std::max(acc, i); return acc; }
);
this->scan_test({v.begin(), v.end()},
{exact.begin(), exact.end()},
cudf::experimental::make_max_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=v[0]](auto i, bool b) mutable { if(b) acc = std::max(acc, i); return acc; }
);
this->scan_test({v.begin(), v.end(), b.begin()},
{exact.begin(), exact.end(), b.begin()},
cudf::experimental::make_max_aggregation(), scan_type::INCLUSIVE);
}
TYPED_TEST(ScanTest, Product)
{
std::vector<TypeParam> v({5, -1, 1, 3, -2, 4});
std::vector<bool> b({1, 1, 1, 0, 1, 1});
std::vector<TypeParam> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc=1](auto i) mutable { acc *= i; return acc; }
);
this->scan_test({v.begin(), v.end()},
{exact.begin(), exact.end()},
cudf::experimental::make_product_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=1](auto i, bool b) mutable { if(b) acc *= i; return acc; }
);
this->scan_test({v.begin(), v.end(), b.begin()},
{exact.begin(), exact.end(), b.begin()},
cudf::experimental::make_product_aggregation(), scan_type::INCLUSIVE);
}
TYPED_TEST(ScanTest, Sum)
{
std::vector<TypeParam> v({-120, 5, 6, 113, -111, 64, -63, 9, 34, -16});
std::vector<bool> b({ 1, 0, 1, 1, 0, 0, 1, 1, 1, 1});
std::vector<TypeParam> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc=0](auto i) mutable { acc += i; return acc; }
);
this->scan_test({v.begin(), v.end()},
{exact.begin(), exact.end()},
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=0](auto i, bool b) mutable { if(b) acc += i; return acc; }
);
this->scan_test({v.begin(), v.end(), b.begin()},
{exact.begin(), exact.end(), b.begin()},
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE);
}
struct ScanStringTest : public cudf::test::BaseFixture {
void scan_test(cudf::test::strings_column_wrapper const& col_in,
cudf::test::strings_column_wrapper const& expected_col_out,
std::unique_ptr<aggregation> const &agg, scan_type inclusive)
{
bool do_print = false;
if (do_print) {
std::cout << "input = {"; cudf::test::print(col_in); std::cout<<"}\n";
std::cout << "expect = {"; cudf::test::print(expected_col_out); std::cout<<"}\n";
}
const column_view input_view = col_in;
std::unique_ptr<cudf::column> col_out;
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(input_view, agg, inclusive));
const column_view result_view = col_out->view();
cudf::test::expect_column_properties_equal(input_view, result_view);
cudf::test::expect_columns_equal(expected_col_out, result_view);
if (do_print) {
std::cout << "result = {"; cudf::test::print(result_view); std::cout<<"}\n";
}
}
};
TEST_F(ScanStringTest, Min)
{
// data and valid arrays
std::vector<std::string> v({"one", "two", "three", "four", "five", "six", "seven", "eight", "nine"});
std::vector<bool> b({ 1, 0, 1, 1, 0, 0, 1, 1, 1});
std::vector<std::string> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc = v[0]](auto i) mutable { acc = std::min(acc, i); return acc; });
// string column without nulls
cudf::test::strings_column_wrapper col_nonulls(v.begin(), v.end());
cudf::test::strings_column_wrapper expected1(exact.begin(), exact.end());
this->scan_test(col_nonulls, expected1,
cudf::experimental::make_min_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=v[0]](auto i, bool b) mutable { if(b) acc = std::min(acc, i); return acc; }
);
// string column with nulls
cudf::test::strings_column_wrapper col_nulls(v.begin(), v.end(), b.begin());
cudf::test::strings_column_wrapper expected2(exact.begin(), exact.end(), b.begin());
this->scan_test(col_nulls, expected2,
cudf::experimental::make_min_aggregation(), scan_type::INCLUSIVE);
}
TEST_F(ScanStringTest, Max)
{
// data and valid arrays
std::vector<std::string> v({"one", "two", "three", "four", "five", "six", "seven", "eight", "nine"});
std::vector<bool> b({ 1, 0, 1, 1, 0, 0, 1, 1, 1});
std::vector<std::string> exact(v.size());
std::transform(v.cbegin(), v.cend(),
exact.begin(),
[acc = v[0]](auto i) mutable { acc = std::max(acc, i); return acc; });
// string column without nulls
cudf::test::strings_column_wrapper col_nonulls(v.begin(), v.end());
cudf::test::strings_column_wrapper expected1(exact.begin(), exact.end());
this->scan_test(col_nonulls, expected1, cudf::experimental::make_max_aggregation(), scan_type::INCLUSIVE);
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=v[0]](auto i, bool b) mutable { if(b) acc = std::max(acc, i); return acc; }
);
// string column with nulls
cudf::test::strings_column_wrapper col_nulls(v.begin(), v.end(), b.begin());
cudf::test::strings_column_wrapper expected2(exact.begin(), exact.end(), b.begin());
this->scan_test(col_nulls, expected2, cudf::experimental::make_max_aggregation(), scan_type::INCLUSIVE);
}
TYPED_TEST(ScanTest, skip_nulls)
{
bool do_print=false;
std::vector<TypeParam> v{1,2,3,4,5,6,7,8,1,1};
std::vector<bool> b{1,1,1,1,1,0,1,0,1,1};
cudf::test::fixed_width_column_wrapper<TypeParam> const col_in{v.begin(), v.end(),
b.begin()};
const column_view input_view = col_in;
std::unique_ptr<cudf::column> col_out;
//test output calculation
std::vector<TypeParam> out_v(input_view.size());
std::vector<bool> out_b(input_view.size());
std::transform(v.cbegin(), v.cend(), b.cbegin(),
out_v.begin(),
[acc=0](auto i, bool b) mutable { if(b) (acc += i); return acc; }
);
std::transform(b.cbegin(), b.cend(),
out_b.begin(),
[acc=true](auto i) mutable { acc = acc && i; return acc; }
);
//skipna=true (default)
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(input_view,
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE, include_nulls::NO));
cudf::test::fixed_width_column_wrapper<TypeParam> expected_col_out1{
out_v.begin(), out_v.end(), b.cbegin()};
cudf::test::expect_column_properties_equal(expected_col_out1, col_out->view());
cudf::test::expect_columns_equal(expected_col_out1, col_out->view());
if(do_print) {
print_view(expected_col_out1, "expect = ");
print_view(col_out->view(), "result = ");
}
//skipna=false
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(input_view,
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE, include_nulls::YES));
cudf::test::fixed_width_column_wrapper<TypeParam> expected_col_out2{
out_v.begin(), out_v.end(), out_b.begin()};
if(do_print) {
print_view(expected_col_out2, "expect = ");
print_view(col_out->view(), "result = ");
}
cudf::test::expect_column_properties_equal(expected_col_out2, col_out->view());
cudf::test::expect_columns_equal(expected_col_out2, col_out->view());
}
TEST_F(ScanStringTest, skip_nulls)
{
bool do_print=false;
// data and valid arrays
std::vector<std::string> v({"one", "two", "three", "four", "five", "six", "seven", "eight", "nine"});
std::vector<bool> b({ 1, 1, 1, 0, 0, 0, 1, 1, 1});
std::vector<std::string> exact(v.size());
std::vector<bool> out_b(v.size());
// test output calculation
std::transform(v.cbegin(), v.cend(), b.begin(),
exact.begin(),
[acc=v[0]](auto i, bool b) mutable { if(b) acc = std::max(acc, i); return acc; }
);
std::transform(b.cbegin(), b.cend(),
out_b.begin(),
[acc=true](auto i) mutable { acc = acc && i; return acc; }
);
// string column with nulls
cudf::test::strings_column_wrapper col_nulls(v.begin(), v.end(), b.begin());
cudf::test::strings_column_wrapper expected2(exact.begin(), exact.end(), out_b.begin());
std::unique_ptr<cudf::column> col_out;
//skipna=false
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(col_nulls,
cudf::experimental::make_max_aggregation(), scan_type::INCLUSIVE, include_nulls::YES));
if(do_print) {
print_view(expected2, "expect = ");
print_view(col_out->view(), "result = ");
}
cudf::test::expect_column_properties_equal(expected2, col_out->view());
cudf::test::expect_columns_equal(expected2, col_out->view());
//Exclusive scan string not supported.
CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::scan(col_nulls,
cudf::experimental::make_min_aggregation(), scan_type::EXCLUSIVE, include_nulls::NO)),
"String types supports only inclusive min/max for `cudf::scan`");
CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::scan(col_nulls,
cudf::experimental::make_min_aggregation(), scan_type::EXCLUSIVE, include_nulls::YES)),
"String types supports only inclusive min/max for `cudf::scan`");
}
TYPED_TEST(ScanTest, EmptyColumnskip_nulls)
{
bool do_print=false;
std::vector<TypeParam> v{};
std::vector<bool> b{};
cudf::test::fixed_width_column_wrapper<TypeParam> const col_in{v.begin(), v.end(),
b.begin()};
std::unique_ptr<cudf::column> col_out;
//test output calculation
std::vector<TypeParam> out_v(v.size());
std::vector<bool> out_b(v.size());
//skipna=true (default)
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(col_in,
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE, include_nulls::NO));
cudf::test::fixed_width_column_wrapper<TypeParam> expected_col_out1{
out_v.begin(), out_v.end(), b.cbegin()};
cudf::test::expect_column_properties_equal(expected_col_out1, col_out->view());
cudf::test::expect_columns_equal(expected_col_out1, col_out->view());
if(do_print) {
print_view(expected_col_out1, "expect = ");
print_view(col_out->view(), "result = ");
}
//skipna=false
CUDF_EXPECT_NO_THROW(col_out = cudf::experimental::scan(col_in,
cudf::experimental::make_sum_aggregation(), scan_type::INCLUSIVE, include_nulls::YES));
cudf::test::fixed_width_column_wrapper<TypeParam> expected_col_out2{
out_v.begin(), out_v.end(), out_b.begin()};
if(do_print) {
print_view(expected_col_out2, "expect = ");
print_view(col_out->view(), "result = ");
}
cudf::test::expect_column_properties_equal(expected_col_out2, col_out->view());
cudf::test::expect_columns_equal(expected_col_out2, col_out->view());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.