hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
5d0fe5e83579ce33982cb69dbbfd40fbc103a637.hip
// !!! This is a file automatically generated by hipify!!! #include "weights.hpp" #include <Attention/Attention.h> #include <CUDA_ptr.hpp> #include <Encoder/Encoder.h> #include <Linear/Linear.h> #include <Model.h> #include <algorithm> #include <array> #include <iostream> #include <random> #include <string> #include <vector> using namespace culib; using mat_t = tile_mat; using attn_t = OTF_attn_full; using attn_config = Attention_config<mat_t, mat_t, mat_t, mat_t, attn_t, true, false>; using encoder_config = Encoder_config<attn_config, mat_t, mat_t, RELU_OP>; CUDA_ptr<half> random_input(int nrow, int ncol); std::vector<half> vec_F2H(const std::vector<float> &arr); int main(int ac, char **av) { const auto weight_file = std::string(av[1]); const auto weights_path = std::string(av[2]); // Transformer constexpr auto TEXT_size = 1800 * 16; auto para = std::make_shared<Model_t>(Model_t{800, 800, 128, 4, 208, 800, 2}); // Input auto IN = random_input(para->seq_len, para->emdim); // Output auto OUT1 = CUDA_ptr<half>(para->seq_len * para->emdim); auto OUT2 = CUDA_ptr<half>(para->seq_len * para->emdim); auto OUT = CUDA_ptr<half>(para->seq_len * TEXT_size); // Build model Weights weights(weight_file, weights_path); for (auto name : weights.layer_names) { std::cout << name << std::endl; } std::vector<std::unique_ptr<Encoder<encoder_config>>> model(para->nlayer); { // QKV const auto QKVname = "transformer_encoder.layers.0.self_attn.in_proj_.npz"; tile_mat WQKV(weights.load_mat<int>("row_ptr", QKVname), weights.load_mat<int>("column", QKVname), vec_F2H(weights.load_mat<float>("data", QKVname)), para->emdim / 16); culib::CUDA_ptr<half> BQKV( vec_F2H(weights.load_mat<float>("B", QKVname))); auto LQKV = std::make_unique<Linear<mat_t>>(para->emdim, para->kdim * 3, WQKV, BQKV, para->seq_len); // O const auto Oname = "transformer_encoder.layers.0.self_attn.out_proj..npz"; tile_mat WO(weights.load_mat<int>("row_ptr", Oname), weights.load_mat<int>("column", Oname), vec_F2H(weights.load_mat<float>("data", Oname)), para->vdim / 16); culib::CUDA_ptr<half> BO(vec_F2H(weights.load_mat<float>("B", Oname))); auto LO = std::make_unique<Linear<mat_t>>(para->vdim, para->emdim, WO, BO, para->seq_len); auto attn = std::make_unique<Attention<attn_config>>( std::move(LQKV), std::move(LO), para); attn->forward(OUT.get(), IN.get(), IN.get(), IN.get()); // LN1 const auto LN1name = "transformer_encoder.layers.0.norm1..npz"; auto LN1 = std::make_unique<SkipLayerNorm>( vec_F2H(weights.load_mat<float>("W", LN1name)).data(), vec_F2H(weights.load_mat<float>("B", LN1name)).data(), para->emdim); // LN2 const auto LN2name = "transformer_encoder.layers.0.norm2..npz"; auto LN2 = std::make_unique<SkipLayerNorm>( vec_F2H(weights.load_mat<float>("W", LN2name)).data(), vec_F2H(weights.load_mat<float>("B", LN2name)).data(), para->emdim); // L1 const auto L1name = "transformer_encoder.layers.0.linear1..npz"; tile_mat WL1(weights.load_mat<int>("row_ptr", L1name), weights.load_mat<int>("column", L1name), vec_F2H(weights.load_mat<float>("data", L1name)), para->emdim / 16); culib::CUDA_ptr<half> BL1( vec_F2H(weights.load_mat<float>("B", L1name))); auto L1 = std::make_unique<Linear<mat_t>>(para->emdim, para->dimFF, WL1, BL1, para->seq_len); // L2 const auto L2name = "transformer_encoder.layers.0.linear2..npz"; tile_mat WL2(weights.load_mat<int>("row_ptr", L2name), weights.load_mat<int>("column", L2name), vec_F2H(weights.load_mat<float>("data", L2name)), para->dimFF / 16); culib::CUDA_ptr<half> BL2( vec_F2H(weights.load_mat<float>("B", L2name))); auto L2 = std::make_unique<Linear<mat_t>>(para->dimFF, para->emdim, WL2, BL2, para->seq_len); model[0] = std::make_unique<Encoder<encoder_config>>( std::move(attn), std::move(LN1), std::move(LN2), std::move(L1), std::move(L2), para); } { // QKV const auto QKVname = "transformer_encoder.layers.1.self_attn.in_proj_.npz"; tile_mat WQKV(weights.load_mat<int>("row_ptr", QKVname), weights.load_mat<int>("column", QKVname), vec_F2H(weights.load_mat<float>("data", QKVname)), para->emdim / 16); culib::CUDA_ptr<half> BQKV( vec_F2H(weights.load_mat<float>("B", QKVname))); auto LQKV = std::make_unique<Linear<mat_t>>(para->emdim, para->kdim * 3, WQKV, BQKV, para->seq_len); // O const auto Oname = "transformer_encoder.layers.1.self_attn.out_proj..npz"; tile_mat WO(weights.load_mat<int>("row_ptr", Oname), weights.load_mat<int>("column", Oname), vec_F2H(weights.load_mat<float>("data", Oname)), para->vdim / 16); culib::CUDA_ptr<half> BO(vec_F2H(weights.load_mat<float>("B", Oname))); auto LO = std::make_unique<Linear<mat_t>>(para->vdim, para->emdim, WO, BO, para->seq_len); auto attn = std::make_unique<Attention<attn_config>>( std::move(LQKV), std::move(LO), para); attn->forward(OUT.get(), IN.get(), IN.get(), IN.get()); // LN1 const auto LN1name = "transformer_encoder.layers.1.norm1..npz"; auto LN1 = std::make_unique<SkipLayerNorm>( vec_F2H(weights.load_mat<float>("W", LN1name)).data(), vec_F2H(weights.load_mat<float>("B", LN1name)).data(), para->emdim); // LN2 const auto LN2name = "transformer_encoder.layers.1.norm2..npz"; auto LN2 = std::make_unique<SkipLayerNorm>( vec_F2H(weights.load_mat<float>("W", LN2name)).data(), vec_F2H(weights.load_mat<float>("B", LN2name)).data(), para->emdim); // L1 const auto L1name = "transformer_encoder.layers.1.linear1..npz"; tile_mat WL1(weights.load_mat<int>("row_ptr", L1name), weights.load_mat<int>("column", L1name), vec_F2H(weights.load_mat<float>("data", L1name)), para->emdim / 16); culib::CUDA_ptr<half> BL1( vec_F2H(weights.load_mat<float>("B", L1name))); auto L1 = std::make_unique<Linear<mat_t>>(para->emdim, para->dimFF, WL1, BL1, para->seq_len); // L2 const auto L2name = "transformer_encoder.layers.1.linear2..npz"; tile_mat WL2(weights.load_mat<int>("row_ptr", L2name), weights.load_mat<int>("column", L2name), vec_F2H(weights.load_mat<float>("data", L2name)), para->dimFF / 16); culib::CUDA_ptr<half> BL2( vec_F2H(weights.load_mat<float>("B", L2name))); auto L2 = std::make_unique<Linear<mat_t>>(para->dimFF, para->emdim, WL2, BL2, para->seq_len); model[1] = std::make_unique<Encoder<encoder_config>>( std::move(attn), std::move(LN1), std::move(LN2), std::move(L1), std::move(L2), para); } // Decoder const auto Decodername = "decoder..npz"; tile_mat WD(weights.load_mat<int>("row_ptr", Decodername), weights.load_mat<int>("column", Decodername), vec_F2H(weights.load_mat<float>("data", Decodername)), para->emdim / 16); culib::CUDA_ptr<half> BD( vec_F2H(weights.load_mat<float>("B", Decodername))); auto LD = std::make_unique<Linear<mat_t>>(para->emdim, TEXT_size, WD, BD, para->seq_len); // Inference model[0]->forward(OUT1.get(), IN.get()); model[1]->forward(OUT2.get(), OUT1.get()); LD->forward(OUT.get(), OUT2.get()); hipDeviceSynchronize(); std::vector<half> h_out(OUT.size); OUT.dump(h_out.data()); std::cout << __half2float(h_out[0]) << std::endl; } CUDA_ptr<half> random_input(int nrow, int ncol) { const auto size = nrow * ncol; std::vector<half> h_data(size); std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<float> dis(-1, 1); std::generate_n(std::back_inserter(h_data), size, [&]() { return __float2half_rn(dis(gen)); }); return CUDA_ptr<half>(h_data); } std::vector<half> vec_F2H(const std::vector<float> &arr) { std::vector<half> res(arr.size()); std::transform(arr.begin(), arr.end(), res.begin(), [](float f) { return __float2half_rn(f); }); return res; }
5d0fe5e83579ce33982cb69dbbfd40fbc103a637.cu
#include "weights.hpp" #include <Attention/Attention.h> #include <CUDA_ptr.hpp> #include <Encoder/Encoder.h> #include <Linear/Linear.h> #include <Model.h> #include <algorithm> #include <array> #include <iostream> #include <random> #include <string> #include <vector> using namespace culib; using mat_t = tile_mat; using attn_t = OTF_attn_full; using attn_config = Attention_config<mat_t, mat_t, mat_t, mat_t, attn_t, true, false>; using encoder_config = Encoder_config<attn_config, mat_t, mat_t, RELU_OP>; CUDA_ptr<half> random_input(int nrow, int ncol); std::vector<half> vec_F2H(const std::vector<float> &arr); int main(int ac, char **av) { const auto weight_file = std::string(av[1]); const auto weights_path = std::string(av[2]); // Transformer constexpr auto TEXT_size = 1800 * 16; auto para = std::make_shared<Model_t>(Model_t{800, 800, 128, 4, 208, 800, 2}); // Input auto IN = random_input(para->seq_len, para->emdim); // Output auto OUT1 = CUDA_ptr<half>(para->seq_len * para->emdim); auto OUT2 = CUDA_ptr<half>(para->seq_len * para->emdim); auto OUT = CUDA_ptr<half>(para->seq_len * TEXT_size); // Build model Weights weights(weight_file, weights_path); for (auto name : weights.layer_names) { std::cout << name << std::endl; } std::vector<std::unique_ptr<Encoder<encoder_config>>> model(para->nlayer); { // QKV const auto QKVname = "transformer_encoder.layers.0.self_attn.in_proj_.npz"; tile_mat WQKV(weights.load_mat<int>("row_ptr", QKVname), weights.load_mat<int>("column", QKVname), vec_F2H(weights.load_mat<float>("data", QKVname)), para->emdim / 16); culib::CUDA_ptr<half> BQKV( vec_F2H(weights.load_mat<float>("B", QKVname))); auto LQKV = std::make_unique<Linear<mat_t>>(para->emdim, para->kdim * 3, WQKV, BQKV, para->seq_len); // O const auto Oname = "transformer_encoder.layers.0.self_attn.out_proj..npz"; tile_mat WO(weights.load_mat<int>("row_ptr", Oname), weights.load_mat<int>("column", Oname), vec_F2H(weights.load_mat<float>("data", Oname)), para->vdim / 16); culib::CUDA_ptr<half> BO(vec_F2H(weights.load_mat<float>("B", Oname))); auto LO = std::make_unique<Linear<mat_t>>(para->vdim, para->emdim, WO, BO, para->seq_len); auto attn = std::make_unique<Attention<attn_config>>( std::move(LQKV), std::move(LO), para); attn->forward(OUT.get(), IN.get(), IN.get(), IN.get()); // LN1 const auto LN1name = "transformer_encoder.layers.0.norm1..npz"; auto LN1 = std::make_unique<SkipLayerNorm>( vec_F2H(weights.load_mat<float>("W", LN1name)).data(), vec_F2H(weights.load_mat<float>("B", LN1name)).data(), para->emdim); // LN2 const auto LN2name = "transformer_encoder.layers.0.norm2..npz"; auto LN2 = std::make_unique<SkipLayerNorm>( vec_F2H(weights.load_mat<float>("W", LN2name)).data(), vec_F2H(weights.load_mat<float>("B", LN2name)).data(), para->emdim); // L1 const auto L1name = "transformer_encoder.layers.0.linear1..npz"; tile_mat WL1(weights.load_mat<int>("row_ptr", L1name), weights.load_mat<int>("column", L1name), vec_F2H(weights.load_mat<float>("data", L1name)), para->emdim / 16); culib::CUDA_ptr<half> BL1( vec_F2H(weights.load_mat<float>("B", L1name))); auto L1 = std::make_unique<Linear<mat_t>>(para->emdim, para->dimFF, WL1, BL1, para->seq_len); // L2 const auto L2name = "transformer_encoder.layers.0.linear2..npz"; tile_mat WL2(weights.load_mat<int>("row_ptr", L2name), weights.load_mat<int>("column", L2name), vec_F2H(weights.load_mat<float>("data", L2name)), para->dimFF / 16); culib::CUDA_ptr<half> BL2( vec_F2H(weights.load_mat<float>("B", L2name))); auto L2 = std::make_unique<Linear<mat_t>>(para->dimFF, para->emdim, WL2, BL2, para->seq_len); model[0] = std::make_unique<Encoder<encoder_config>>( std::move(attn), std::move(LN1), std::move(LN2), std::move(L1), std::move(L2), para); } { // QKV const auto QKVname = "transformer_encoder.layers.1.self_attn.in_proj_.npz"; tile_mat WQKV(weights.load_mat<int>("row_ptr", QKVname), weights.load_mat<int>("column", QKVname), vec_F2H(weights.load_mat<float>("data", QKVname)), para->emdim / 16); culib::CUDA_ptr<half> BQKV( vec_F2H(weights.load_mat<float>("B", QKVname))); auto LQKV = std::make_unique<Linear<mat_t>>(para->emdim, para->kdim * 3, WQKV, BQKV, para->seq_len); // O const auto Oname = "transformer_encoder.layers.1.self_attn.out_proj..npz"; tile_mat WO(weights.load_mat<int>("row_ptr", Oname), weights.load_mat<int>("column", Oname), vec_F2H(weights.load_mat<float>("data", Oname)), para->vdim / 16); culib::CUDA_ptr<half> BO(vec_F2H(weights.load_mat<float>("B", Oname))); auto LO = std::make_unique<Linear<mat_t>>(para->vdim, para->emdim, WO, BO, para->seq_len); auto attn = std::make_unique<Attention<attn_config>>( std::move(LQKV), std::move(LO), para); attn->forward(OUT.get(), IN.get(), IN.get(), IN.get()); // LN1 const auto LN1name = "transformer_encoder.layers.1.norm1..npz"; auto LN1 = std::make_unique<SkipLayerNorm>( vec_F2H(weights.load_mat<float>("W", LN1name)).data(), vec_F2H(weights.load_mat<float>("B", LN1name)).data(), para->emdim); // LN2 const auto LN2name = "transformer_encoder.layers.1.norm2..npz"; auto LN2 = std::make_unique<SkipLayerNorm>( vec_F2H(weights.load_mat<float>("W", LN2name)).data(), vec_F2H(weights.load_mat<float>("B", LN2name)).data(), para->emdim); // L1 const auto L1name = "transformer_encoder.layers.1.linear1..npz"; tile_mat WL1(weights.load_mat<int>("row_ptr", L1name), weights.load_mat<int>("column", L1name), vec_F2H(weights.load_mat<float>("data", L1name)), para->emdim / 16); culib::CUDA_ptr<half> BL1( vec_F2H(weights.load_mat<float>("B", L1name))); auto L1 = std::make_unique<Linear<mat_t>>(para->emdim, para->dimFF, WL1, BL1, para->seq_len); // L2 const auto L2name = "transformer_encoder.layers.1.linear2..npz"; tile_mat WL2(weights.load_mat<int>("row_ptr", L2name), weights.load_mat<int>("column", L2name), vec_F2H(weights.load_mat<float>("data", L2name)), para->dimFF / 16); culib::CUDA_ptr<half> BL2( vec_F2H(weights.load_mat<float>("B", L2name))); auto L2 = std::make_unique<Linear<mat_t>>(para->dimFF, para->emdim, WL2, BL2, para->seq_len); model[1] = std::make_unique<Encoder<encoder_config>>( std::move(attn), std::move(LN1), std::move(LN2), std::move(L1), std::move(L2), para); } // Decoder const auto Decodername = "decoder..npz"; tile_mat WD(weights.load_mat<int>("row_ptr", Decodername), weights.load_mat<int>("column", Decodername), vec_F2H(weights.load_mat<float>("data", Decodername)), para->emdim / 16); culib::CUDA_ptr<half> BD( vec_F2H(weights.load_mat<float>("B", Decodername))); auto LD = std::make_unique<Linear<mat_t>>(para->emdim, TEXT_size, WD, BD, para->seq_len); // Inference model[0]->forward(OUT1.get(), IN.get()); model[1]->forward(OUT2.get(), OUT1.get()); LD->forward(OUT.get(), OUT2.get()); cudaDeviceSynchronize(); std::vector<half> h_out(OUT.size); OUT.dump(h_out.data()); std::cout << __half2float(h_out[0]) << std::endl; } CUDA_ptr<half> random_input(int nrow, int ncol) { const auto size = nrow * ncol; std::vector<half> h_data(size); std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<float> dis(-1, 1); std::generate_n(std::back_inserter(h_data), size, [&]() { return __float2half_rn(dis(gen)); }); return CUDA_ptr<half>(h_data); } std::vector<half> vec_F2H(const std::vector<float> &arr) { std::vector<half> res(arr.size()); std::transform(arr.begin(), arr.end(), res.begin(), [](float f) { return __float2half_rn(f); }); return res; }
ab96c5cfc1bb1538cacf62bd581e857194007877.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define HISTOGRAM_LENGTH 256 __global__ void histo_kernel(unsigned char * buffer, unsigned int * histo, long size) { // compute histogram with a private version in each block __shared__ unsigned int histo_private[HISTOGRAM_LENGTH]; int bx = blockIdx.x; int tx = threadIdx.x; // index of current pixel int index = tx+bx*blockDim.x; // set initial values of histogram to zero if (tx < HISTOGRAM_LENGTH) histo_private[tx] = 0; __syncthreads(); int stride = blockDim.x*gridDim.x; //iterate to add values while (index < stride) { atomicAdd(&(histo_private[buffer[index]]), 1); index += stride; } __syncthreads(); //copy private histogram to device histogram if(tx<256) { atomicAdd(&(histo[tx]), histo_private[tx]); } }
ab96c5cfc1bb1538cacf62bd581e857194007877.cu
#include "includes.h" #define HISTOGRAM_LENGTH 256 __global__ void histo_kernel(unsigned char * buffer, unsigned int * histo, long size) { // compute histogram with a private version in each block __shared__ unsigned int histo_private[HISTOGRAM_LENGTH]; int bx = blockIdx.x; int tx = threadIdx.x; // index of current pixel int index = tx+bx*blockDim.x; // set initial values of histogram to zero if (tx < HISTOGRAM_LENGTH) histo_private[tx] = 0; __syncthreads(); int stride = blockDim.x*gridDim.x; //iterate to add values while (index < stride) { atomicAdd(&(histo_private[buffer[index]]), 1); index += stride; } __syncthreads(); //copy private histogram to device histogram if(tx<256) { atomicAdd(&(histo[tx]), histo_private[tx]); } }
d484af71bed2bf59c3390a8fa7e87279a12efe69.hip
// !!! This is a file automatically generated by hipify!!! /** * bicg.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "../../common/polybenchUtilFuncts.h" //Error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 #define GPU_DEVICE 0 /* Problem size. */ #define NX 128 #define NY 128 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 256 #define DIM_THREAD_BLOCK_Y 1 #define STR_SIZE 256 #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r) { int i, j; for (i = 0; i < NX; i++) { r[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i*NY + j] = ((DATA_TYPE) i*j) / NX; } } for (i = 0; i < NY; i++) { p[i] = i * M_PI; } } void print(DATA_TYPE* q_outputFromGpu,DATA_TYPE* s_outputFromGpu){ FILE* fp; fp=fopen("out.txt","w"); char str[STR_SIZE]; if(!fp) { printf("Error writing!"); return; } //sprintf(str,"%d",NI); //fputs(str,fp); int i,j; for (i = 0 ; i < NX ; ++i) { sprintf(str,"%f\n",q_outputFromGpu[i]); fputs(str,fp); } for (j = 0 ; j < NY ; ++j) { sprintf(str,"%f\n",s_outputFromGpu[j]); fputs(str,fp); } fclose(fp); } void compareResults(DATA_TYPE* s, DATA_TYPE* s_outputFromGpu, DATA_TYPE* q, DATA_TYPE* q_outputFromGpu) { int i,fail; fail = 0; // Compare s with s_cuda for (i=0; i<NX; i++) { if (percentDiff(q[i], q_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } for (i=0; i<NY; i++) { if (percentDiff(s[i], s_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } //Distributed (split) from initial loop and permuted into reverse order to allow parallelism... __global__ void bicg_kernel1(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s) { int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < NY) { s[j] = 0.0f; int i; for(i = 0; i < NX; i++) { s[j] += A[i * NY + j] * r[i]; } } } //Distributed (split) from initial loop to allow parallelism __global__ void bicg_kernel2(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *q) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < NX) { q[i] = 0.0f; int j; for(j=0; j < NY; j++) { q[i] += A[i * NY + j] * p[j]; } } } void bicg_cpu(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q) { int i,j; for (i = 0; i < NY; i++) { s[i] = 0.0; } for (i = 0; i < NX; i++) { q[i] = 0.0; for (j = 0; j < NY; j++) { s[j] = s[j] + r[i] * A[i*NY + j]; q[i] = q[i] + A[i*NY + j] * p[j]; } } } void bicgCuda(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q, DATA_TYPE* s_outputFromGpu, DATA_TYPE* q_outputFromGpu) { double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *q_gpu; DATA_TYPE *p_gpu; DATA_TYPE *r_gpu; DATA_TYPE *s_gpu; hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY); hipMalloc((void **)&r_gpu, sizeof(DATA_TYPE) * NX); hipMalloc((void **)&s_gpu, sizeof(DATA_TYPE) * NY); hipMalloc((void **)&p_gpu, sizeof(DATA_TYPE) * NY); hipMalloc((void **)&q_gpu, sizeof(DATA_TYPE) * NX); hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice); hipMemcpy(r_gpu, r, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice); hipMemcpy(s_gpu, s, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice); hipMemcpy(p_gpu, p, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice); hipMemcpy(q_gpu, q, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1); dim3 grid2((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1); t_start = rtclock(); hipLaunchKernelGGL(( bicg_kernel1), dim3(grid1), dim3(block) , 0, 0, A_gpu, r_gpu, s_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( bicg_kernel2), dim3(grid2), dim3(block) , 0, 0, A_gpu, p_gpu, q_gpu); hipDeviceSynchronize(); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); hipMemcpy(s_outputFromGpu, s_gpu, sizeof(DATA_TYPE) * NY, hipMemcpyDeviceToHost); hipMemcpy(q_outputFromGpu, q_gpu, sizeof(DATA_TYPE) * NX, hipMemcpyDeviceToHost); hipFree(A_gpu); hipFree(r_gpu); hipFree(s_gpu); hipFree(p_gpu); hipFree(q_gpu); } int main(int argc, char** argv) { // double t_start, t_end; DATA_TYPE* A; DATA_TYPE* r; DATA_TYPE* s; DATA_TYPE* p; DATA_TYPE* q; DATA_TYPE* s_outputFromGpu; DATA_TYPE* q_outputFromGpu; A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); r = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); s = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); p = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); q = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); s_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); q_outputFromGpu = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); init_array(A, p, r); GPU_argv_init(); bicgCuda(A, r, s, p, q, s_outputFromGpu, q_outputFromGpu); print(q_outputFromGpu, s_outputFromGpu); // t_start = rtclock(); // bicg_cpu(A, r, s, p, q); // t_end = rtclock(); // fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); // compareResults(s, s_outputFromGpu, q, q_outputFromGpu); free(A); free(r); free(s); free(p); free(q); free(s_outputFromGpu); free(q_outputFromGpu); return 0; }
d484af71bed2bf59c3390a8fa7e87279a12efe69.cu
/** * bicg.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <sys/time.h> #include <cuda.h> #include "../../common/polybenchUtilFuncts.h" //Error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 #define GPU_DEVICE 0 /* Problem size. */ #define NX 128 #define NY 128 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 256 #define DIM_THREAD_BLOCK_Y 1 #define STR_SIZE 256 #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r) { int i, j; for (i = 0; i < NX; i++) { r[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i*NY + j] = ((DATA_TYPE) i*j) / NX; } } for (i = 0; i < NY; i++) { p[i] = i * M_PI; } } void print(DATA_TYPE* q_outputFromGpu,DATA_TYPE* s_outputFromGpu){ FILE* fp; fp=fopen("out.txt","w"); char str[STR_SIZE]; if(!fp) { printf("Error writing!"); return; } //sprintf(str,"%d",NI); //fputs(str,fp); int i,j; for (i = 0 ; i < NX ; ++i) { sprintf(str,"%f\n",q_outputFromGpu[i]); fputs(str,fp); } for (j = 0 ; j < NY ; ++j) { sprintf(str,"%f\n",s_outputFromGpu[j]); fputs(str,fp); } fclose(fp); } void compareResults(DATA_TYPE* s, DATA_TYPE* s_outputFromGpu, DATA_TYPE* q, DATA_TYPE* q_outputFromGpu) { int i,fail; fail = 0; // Compare s with s_cuda for (i=0; i<NX; i++) { if (percentDiff(q[i], q_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } for (i=0; i<NY; i++) { if (percentDiff(s[i], s_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } //Distributed (split) from initial loop and permuted into reverse order to allow parallelism... __global__ void bicg_kernel1(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s) { int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < NY) { s[j] = 0.0f; int i; for(i = 0; i < NX; i++) { s[j] += A[i * NY + j] * r[i]; } } } //Distributed (split) from initial loop to allow parallelism __global__ void bicg_kernel2(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *q) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < NX) { q[i] = 0.0f; int j; for(j=0; j < NY; j++) { q[i] += A[i * NY + j] * p[j]; } } } void bicg_cpu(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q) { int i,j; for (i = 0; i < NY; i++) { s[i] = 0.0; } for (i = 0; i < NX; i++) { q[i] = 0.0; for (j = 0; j < NY; j++) { s[j] = s[j] + r[i] * A[i*NY + j]; q[i] = q[i] + A[i*NY + j] * p[j]; } } } void bicgCuda(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q, DATA_TYPE* s_outputFromGpu, DATA_TYPE* q_outputFromGpu) { double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *q_gpu; DATA_TYPE *p_gpu; DATA_TYPE *r_gpu; DATA_TYPE *s_gpu; cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY); cudaMalloc((void **)&r_gpu, sizeof(DATA_TYPE) * NX); cudaMalloc((void **)&s_gpu, sizeof(DATA_TYPE) * NY); cudaMalloc((void **)&p_gpu, sizeof(DATA_TYPE) * NY); cudaMalloc((void **)&q_gpu, sizeof(DATA_TYPE) * NX); cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice); cudaMemcpy(r_gpu, r, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice); cudaMemcpy(s_gpu, s, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice); cudaMemcpy(p_gpu, p, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice); cudaMemcpy(q_gpu, q, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1); dim3 grid2((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1); t_start = rtclock(); bicg_kernel1<<< grid1, block >>>(A_gpu, r_gpu, s_gpu); cudaThreadSynchronize(); bicg_kernel2<<< grid2, block >>>(A_gpu, p_gpu, q_gpu); cudaThreadSynchronize(); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); cudaMemcpy(s_outputFromGpu, s_gpu, sizeof(DATA_TYPE) * NY, cudaMemcpyDeviceToHost); cudaMemcpy(q_outputFromGpu, q_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost); cudaFree(A_gpu); cudaFree(r_gpu); cudaFree(s_gpu); cudaFree(p_gpu); cudaFree(q_gpu); } int main(int argc, char** argv) { // double t_start, t_end; DATA_TYPE* A; DATA_TYPE* r; DATA_TYPE* s; DATA_TYPE* p; DATA_TYPE* q; DATA_TYPE* s_outputFromGpu; DATA_TYPE* q_outputFromGpu; A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); r = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); s = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); p = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); q = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); s_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); q_outputFromGpu = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); init_array(A, p, r); GPU_argv_init(); bicgCuda(A, r, s, p, q, s_outputFromGpu, q_outputFromGpu); print(q_outputFromGpu, s_outputFromGpu); // t_start = rtclock(); // bicg_cpu(A, r, s, p, q); // t_end = rtclock(); // fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); // compareResults(s, s_outputFromGpu, q, q_outputFromGpu); free(A); free(r); free(s); free(p); free(q); free(s_outputFromGpu); free(q_outputFromGpu); return 0; }
20c7a41340a24d7ab4f28a4c761a5d9e7e81d94f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : structoverflow.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <stdio.h> #include <iostream> #include <numeric> #include <stdlib.h> using namespace std; #define BUF_LEN 6 static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) __device__ __noinline__ void normal() { printf("normal!\n"); } __device__ __noinline__ void secret() { printf("Hello Admin!\n"); } struct unsafe { unsigned long buf[BUF_LEN]; void (*normal)(); }; __device__ __noinline__ void init(struct unsafe *data) { data->normal=normal; } __global__ void test_kernel(unsigned long *input,int len,int admin) { struct unsafe cu; init(&cu); for(int i=0;i<len;i++) cu.buf[i]=input[i]; cu.normal(); secret(); printf("%p",secret); } int main(void) { unsigned long input[10]; unsigned long *dev_input; int len=6; int admin=0; for(int i=0;i<10;i++) { input[i]=0xb2140;//this is secret address } CUDA_CHECK_RETURN(hipMalloc((void**)&dev_input,10*sizeof(unsigned long))); CUDA_CHECK_RETURN(hipMemcpy(dev_input,input,10*sizeof(unsigned long),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( test_kernel), dim3(1),dim3(1), 0, 0, dev_input,len,admin); hipFree(dev_input); return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err) { if (err == hipSuccess) return; std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
20c7a41340a24d7ab4f28a4c761a5d9e7e81d94f.cu
/* ============================================================================ Name : structoverflow.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <stdio.h> #include <iostream> #include <numeric> #include <stdlib.h> using namespace std; #define BUF_LEN 6 static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) __device__ __noinline__ void normal() { printf("normal!\n"); } __device__ __noinline__ void secret() { printf("Hello Admin!\n"); } struct unsafe { unsigned long buf[BUF_LEN]; void (*normal)(); }; __device__ __noinline__ void init(struct unsafe *data) { data->normal=normal; } __global__ void test_kernel(unsigned long *input,int len,int admin) { struct unsafe cu; init(&cu); for(int i=0;i<len;i++) cu.buf[i]=input[i]; cu.normal(); secret(); printf("%p",secret); } int main(void) { unsigned long input[10]; unsigned long *dev_input; int len=6; int admin=0; for(int i=0;i<10;i++) { input[i]=0xb2140;//this is secret() address } CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_input,10*sizeof(unsigned long))); CUDA_CHECK_RETURN(cudaMemcpy(dev_input,input,10*sizeof(unsigned long),cudaMemcpyHostToDevice)); test_kernel<<<1,1>>>(dev_input,len,admin); cudaFree(dev_input); return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
32aa2f19de818616f7f0555bc54b0ae17bc6637b.hip
// !!! This is a file automatically generated by hipify!!! /* Host code that implements a separable convolution filter of a * 2D signal with a gaussian kernel. * * Author: Naga Kandasamy * Date modified: May 26, 2020 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> extern "C" void compute_gold(float *, float *, int, int, int); extern "C" float *create_kernel(float, int); void print_kernel(float *, int); void print_matrix(float *, int, int); /* Width of convolution kernel */ #define HALF_WIDTH 8 #define COEFF 10 /* Uncomment line below to spit out debug information */ // #define DEBUG /* Include device code */ #include "separable_convolution_kernel.cu" /* FIXME: Edit this function to compute the convolution on the device.*/ void compute_on_device(float *gpu_result, float *matrix_c,\ float *kernel, int num_cols,\ int num_rows, int half_width) { return; } int main(int argc, char **argv) { if (argc < 3) { printf("Usage: %s num-rows num-columns\n", argv[0]); printf("num-rows: height of the matrix\n"); printf("num-columns: width of the matrix\n"); exit(EXIT_FAILURE); } int num_rows = atoi(argv[1]); int num_cols = atoi(argv[2]); /* Create input matrix */ int num_elements = num_rows * num_cols; printf("Creating input matrix of %d x %d\n", num_rows, num_cols); float *matrix_a = (float *)malloc(sizeof(float) * num_elements); float *matrix_c = (float *)malloc(sizeof(float) * num_elements); srand(time(NULL)); int i; for (i = 0; i < num_elements; i++) { matrix_a[i] = rand()/(float)RAND_MAX; matrix_c[i] = matrix_a[i]; /* Copy contents of matrix_a into matrix_c */ } /* Create Gaussian kernel */ float *gaussian_kernel = create_kernel((float)COEFF, HALF_WIDTH); #ifdef DEBUG print_kernel(gaussian_kernel, HALF_WIDTH); #endif /* Convolve matrix along rows and columns. The result is stored in matrix_a, thereby overwriting the original contents of matrix_a. */ printf("\nConvolving the matrix on the CPU\n"); compute_gold(matrix_a, gaussian_kernel, num_cols,\ num_rows, HALF_WIDTH); #ifdef DEBUG print_matrix(matrix_a, num_cols, num_rows); #endif float *gpu_result = (float *)malloc(sizeof(float) * num_elements); /* FIXME: Edit this function to complete the functionality on the GPU. The input matrix is matrix_c and the result must be stored in gpu_result. */ printf("\nConvolving matrix on the GPU\n"); compute_on_device(gpu_result, matrix_c, gaussian_kernel, num_cols,\ num_rows, HALF_WIDTH); printf("\nComparing CPU and GPU results\n"); float sum_delta = 0, sum_ref = 0; for (i = 0; i < num_elements; i++) { sum_delta += fabsf(matrix_a[i] - gpu_result[i]); sum_ref += fabsf(matrix_a[i]); } float L1norm = sum_delta / sum_ref; float eps = 1e-6; printf("L1 norm: %E\n", L1norm); printf((L1norm < eps) ? "TEST PASSED\n" : "TEST FAILED\n"); free(matrix_a); free(matrix_c); free(gpu_result); free(gaussian_kernel); exit(EXIT_SUCCESS); } /* Check for errors reported by the CUDA run time */ void check_for_error(char *msg) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { printf("CUDA ERROR: %s (%s)\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } return; } /* Print convolution kernel */ void print_kernel(float *kernel, int half_width) { int i, j = 0; for (i = -half_width; i <= half_width; i++) { printf("%0.2f ", kernel[j]); j++; } printf("\n"); return; } /* Print matrix */ void print_matrix(float *matrix, int num_cols, int num_rows) { int i, j; float element; for (i = 0; i < num_rows; i++) { for (j = 0; j < num_cols; j++){ element = matrix[i * num_cols + j]; printf("%0.2f ", element); } printf("\n"); } return; }
32aa2f19de818616f7f0555bc54b0ae17bc6637b.cu
/* Host code that implements a separable convolution filter of a * 2D signal with a gaussian kernel. * * Author: Naga Kandasamy * Date modified: May 26, 2020 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> extern "C" void compute_gold(float *, float *, int, int, int); extern "C" float *create_kernel(float, int); void print_kernel(float *, int); void print_matrix(float *, int, int); /* Width of convolution kernel */ #define HALF_WIDTH 8 #define COEFF 10 /* Uncomment line below to spit out debug information */ // #define DEBUG /* Include device code */ #include "separable_convolution_kernel.cu" /* FIXME: Edit this function to compute the convolution on the device.*/ void compute_on_device(float *gpu_result, float *matrix_c,\ float *kernel, int num_cols,\ int num_rows, int half_width) { return; } int main(int argc, char **argv) { if (argc < 3) { printf("Usage: %s num-rows num-columns\n", argv[0]); printf("num-rows: height of the matrix\n"); printf("num-columns: width of the matrix\n"); exit(EXIT_FAILURE); } int num_rows = atoi(argv[1]); int num_cols = atoi(argv[2]); /* Create input matrix */ int num_elements = num_rows * num_cols; printf("Creating input matrix of %d x %d\n", num_rows, num_cols); float *matrix_a = (float *)malloc(sizeof(float) * num_elements); float *matrix_c = (float *)malloc(sizeof(float) * num_elements); srand(time(NULL)); int i; for (i = 0; i < num_elements; i++) { matrix_a[i] = rand()/(float)RAND_MAX; matrix_c[i] = matrix_a[i]; /* Copy contents of matrix_a into matrix_c */ } /* Create Gaussian kernel */ float *gaussian_kernel = create_kernel((float)COEFF, HALF_WIDTH); #ifdef DEBUG print_kernel(gaussian_kernel, HALF_WIDTH); #endif /* Convolve matrix along rows and columns. The result is stored in matrix_a, thereby overwriting the original contents of matrix_a. */ printf("\nConvolving the matrix on the CPU\n"); compute_gold(matrix_a, gaussian_kernel, num_cols,\ num_rows, HALF_WIDTH); #ifdef DEBUG print_matrix(matrix_a, num_cols, num_rows); #endif float *gpu_result = (float *)malloc(sizeof(float) * num_elements); /* FIXME: Edit this function to complete the functionality on the GPU. The input matrix is matrix_c and the result must be stored in gpu_result. */ printf("\nConvolving matrix on the GPU\n"); compute_on_device(gpu_result, matrix_c, gaussian_kernel, num_cols,\ num_rows, HALF_WIDTH); printf("\nComparing CPU and GPU results\n"); float sum_delta = 0, sum_ref = 0; for (i = 0; i < num_elements; i++) { sum_delta += fabsf(matrix_a[i] - gpu_result[i]); sum_ref += fabsf(matrix_a[i]); } float L1norm = sum_delta / sum_ref; float eps = 1e-6; printf("L1 norm: %E\n", L1norm); printf((L1norm < eps) ? "TEST PASSED\n" : "TEST FAILED\n"); free(matrix_a); free(matrix_c); free(gpu_result); free(gaussian_kernel); exit(EXIT_SUCCESS); } /* Check for errors reported by the CUDA run time */ void check_for_error(char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { printf("CUDA ERROR: %s (%s)\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } return; } /* Print convolution kernel */ void print_kernel(float *kernel, int half_width) { int i, j = 0; for (i = -half_width; i <= half_width; i++) { printf("%0.2f ", kernel[j]); j++; } printf("\n"); return; } /* Print matrix */ void print_matrix(float *matrix, int num_cols, int num_rows) { int i, j; float element; for (i = 0; i < num_rows; i++) { for (j = 0; j < num_cols; j++){ element = matrix[i * num_cols + j]; printf("%0.2f ", element); } printf("\n"); } return; }
74169f59efd296f721bfe474ad4d3aacfbb5381b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/hip/HIPContext.h> #include <THH/THHAtomics.cuh> #include "util.cuh" #include "operator.cuh" #include "rspmm.h" namespace at { // Memory & time efficient implementation of generalized spmm // Much of the code is inspired by GE-SpMM // https://github.com/hgyhungry/ge-spmm namespace { const int kCoarseningFactor = 2; const int kThreadPerBlock = 256; } // namespace anonymous template <class scalar_t, class NaryOp, class BinaryOp> __global__ void rspmm_forward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const int64_t *layer_ind, const scalar_t *value, const scalar_t *relation, const scalar_t *input, scalar_t *output, int64_t num_row, int64_t nnz, int64_t dim) { // for best optimization, the following code is compiled with constant warpSize assert(blockDim.x == warpSize); extern __shared__ int64_t buffer[]; int64_t *col_ind_buf = buffer; int64_t *layer_ind_buf = buffer + blockDim.y * warpSize; scalar_t *value_buf = reinterpret_cast<scalar_t *>(layer_ind_buf + blockDim.y * warpSize); col_ind_buf += threadIdx.y * warpSize; layer_ind_buf += threadIdx.y * warpSize; value_buf += threadIdx.y * warpSize; int64_t row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= num_row) return; int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x; int64_t ptr_start = row_ptr[row]; int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz; scalar_t out[kCoarseningFactor]; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) out[i] = NaryOp::zero; for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) { int64_t ptr = block_ptr + threadIdx.x; if (ptr < ptr_end) { col_ind_buf[threadIdx.x] = col_ind[ptr]; layer_ind_buf[threadIdx.x] = layer_ind[ptr]; value_buf[threadIdx.x] = value[ptr]; } __syncwarp(); int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr; for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) { int64_t col = col_ind_buf[offset_ptr]; int64_t layer = layer_ind_buf[offset_ptr]; scalar_t val = value_buf[offset_ptr]; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; scalar_t x = BinaryOp::forward(relation[layer * dim + d], input[col * dim + d]); scalar_t y = val * x; out[i] = NaryOp::forward(out[i], y); } } __syncwarp(); } #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; output[row * dim + d] = out[i]; } } template <class scalar_t, class NaryOp, class BinaryOp> __global__ void rspmm_backward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const int64_t *layer_ind, const scalar_t *value, const scalar_t *relation, const scalar_t *input, const scalar_t *output, const scalar_t *output_grad, scalar_t *value_grad, scalar_t *relation_grad, scalar_t *input_grad, int64_t num_row, int64_t nnz, int64_t dim) { // for best optimization, the following code is compiled with constant warpSize assert(blockDim.x == warpSize); extern __shared__ int64_t buffer[]; int64_t *col_ind_buf = buffer; int64_t *layer_ind_buf = col_ind_buf + blockDim.y * warpSize; scalar_t *value_buf = reinterpret_cast<scalar_t *>(layer_ind_buf + blockDim.y * warpSize); col_ind_buf += threadIdx.y * warpSize; layer_ind_buf += threadIdx.y * warpSize; value_buf += threadIdx.y * warpSize; int64_t row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= num_row) return; int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x; int64_t ptr_start = row_ptr[row]; int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz; for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) { int64_t ptr = block_ptr + threadIdx.x; if (ptr < ptr_end) { col_ind_buf[threadIdx.x] = col_ind[ptr]; layer_ind_buf[threadIdx.x] = layer_ind[ptr]; value_buf[threadIdx.x] = value[ptr]; } __syncwarp(); int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr; for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) { int64_t col = col_ind_buf[offset_ptr]; int64_t layer = layer_ind_buf[offset_ptr]; scalar_t val = value_buf[offset_ptr]; scalar_t val_grad = 0; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; scalar_t rel = relation[layer * dim + d]; scalar_t in = input[col * dim + d]; scalar_t out = output[row * dim + d]; scalar_t out_grad = output_grad[row * dim + d]; scalar_t x = BinaryOp::forward(rel, in); scalar_t y = val * x; scalar_t dx_drel = BinaryOp::backward_lhs(rel, in); scalar_t dx_din = BinaryOp::backward_rhs(rel, in); scalar_t dout_dy = NaryOp::backward(out, y); scalar_t dy_dval = x; scalar_t dy_dx = val; val_grad += out_grad * dout_dy * dy_dval; atomicAdd(&relation_grad[layer * dim + d], out_grad * dout_dy * dy_dx * dx_drel); atomicAdd(&input_grad[col * dim + d], out_grad * dout_dy * dy_dx * dx_din); } val_grad = warp_reduce(val_grad); if (threadIdx.x == 0) atomicAdd(&value_grad[block_ptr + offset_ptr], val_grad); } __syncwarp(); } } // only relation & input require gradients template <class scalar_t, class NaryOp, class BinaryOp> __global__ void rspmm_backward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const int64_t *layer_ind, const scalar_t *value, const scalar_t *relation, const scalar_t *input, const scalar_t *output, const scalar_t *output_grad, scalar_t *relation_grad, scalar_t *input_grad, int64_t num_row, int64_t nnz, int64_t dim) { // for best optimization, the following code is compiled with constant warpSize assert(blockDim.x == warpSize); extern __shared__ int64_t buffer[]; int64_t *col_ind_buf = buffer; int64_t *layer_ind_buf = col_ind_buf + blockDim.y * warpSize; scalar_t *value_buf = reinterpret_cast<scalar_t *>(layer_ind_buf + blockDim.y * warpSize); col_ind_buf += threadIdx.y * warpSize; layer_ind_buf += threadIdx.y * warpSize; value_buf += threadIdx.y * warpSize; int64_t row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= num_row) return; int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x; int64_t ptr_start = row_ptr[row]; int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz; for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) { int64_t ptr = block_ptr + threadIdx.x; if (ptr < ptr_end) { col_ind_buf[threadIdx.x] = col_ind[ptr]; layer_ind_buf[threadIdx.x] = layer_ind[ptr]; value_buf[threadIdx.x] = value[ptr]; } __syncwarp(); int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr; for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) { int64_t col = col_ind_buf[offset_ptr]; int64_t layer = layer_ind_buf[offset_ptr]; scalar_t val = value_buf[offset_ptr]; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; scalar_t rel = relation[layer * dim + d]; scalar_t in = input[col * dim + d]; scalar_t out = output[row * dim + d]; scalar_t out_grad = output_grad[row * dim + d]; scalar_t x = BinaryOp::forward(rel, in); scalar_t y = val * x; scalar_t dx_drel = BinaryOp::backward_lhs(rel, in); scalar_t dx_din = BinaryOp::backward_rhs(rel, in); scalar_t dout_dy = NaryOp::backward(out, y); scalar_t dy_dx = val; atomicAdd(&relation_grad[layer * dim + d], out_grad * dout_dy * dy_dx * dx_drel); atomicAdd(&input_grad[col * dim + d], out_grad * dout_dy * dy_dx * dx_din); } } __syncwarp(); } } template <template<class> class NaryOp, template<class> class BinaryOp> Tensor rspmm_forward_cuda(const SparseTensor &sparse, const Tensor &relation_, const Tensor &input_) { constexpr const char *fn_name = "rspmm_forward_cuda"; TensorArg sparse_arg(sparse, "sparse", 1), relation_arg(relation_, "relation", 2), input_arg(input_, "input", 3); rspmm_forward_check(fn_name, sparse_arg, relation_arg, input_arg); checkAllSameGPU(fn_name, {sparse_arg, relation_arg, input_arg}); const Tensor relation = relation_.contiguous(); const Tensor input = input_.contiguous(); int64_t nnz = sparse._nnz(); int64_t dim = input.size(1); int64_t num_row = sparse.size(0); Tensor output = at::empty({num_row, dim}, input.options()); auto csr = coo2csr3d(sparse); Tensor row_ptr = std::get<0>(csr); Tensor col_ind = std::get<1>(csr); Tensor layer_ind = std::get<2>(csr); Tensor value = std::get<3>(csr); hipSetDevice(input.get_device()); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int dim_per_block = 32; // warpSize const int num_dim_block = (dim + dim_per_block * kCoarseningFactor - 1) / (dim_per_block * kCoarseningFactor); const int row_per_block = kThreadPerBlock / dim_per_block; const int num_row_block = (num_row + row_per_block - 1) / row_per_block; AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] { const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t)); hipLaunchKernelGGL(( rspmm_forward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>) , dim3(dim3(num_row_block, num_dim_block)), dim3(dim3(dim_per_block, row_per_block)), memory_size, stream, row_ptr.data_ptr<int64_t>(), col_ind.data_ptr<int64_t>(), layer_ind.data_ptr<int64_t>(), value.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), num_row, nnz, dim ); }); return output; } template <template<class> class NaryOp, template<class> class BinaryOp> std::tuple<SparseTensor, Tensor, Tensor> rspmm_backward_cuda( const SparseTensor &sparse, const Tensor &relation_, const Tensor &input_, const Tensor &output_, const Tensor &output_grad_) { constexpr const char *fn_name = "rspmm_backward_cuda"; TensorArg sparse_arg(sparse, "sparse", 1), relation_arg(relation_, "relation", 2), input_arg(input_, "input", 3), output_arg(output_, "output", 4), output_grad_arg(output_grad_, "output_grad", 5); rspmm_backward_check(fn_name, sparse_arg, relation_arg, input_arg, output_arg, output_grad_arg); checkAllSameGPU(fn_name, {sparse_arg, relation_arg, input_arg, output_arg, output_grad_arg}); const Tensor relation = relation_.contiguous(); const Tensor input = input_.contiguous(); const Tensor output = output_.contiguous(); const Tensor output_grad = output_grad_.contiguous(); int64_t nnz = sparse._nnz(); int64_t dim = input.size(1); int64_t num_row = sparse.size(0); Tensor value_grad = at::zeros_like(sparse.values()); Tensor relation_grad = at::zeros_like(relation); Tensor input_grad = at::zeros_like(input); SparseTensor sparse_grad = at::_sparse_coo_tensor_unsafe(sparse.indices(), value_grad, sparse.sizes()); auto csr = coo2csr3d(sparse); Tensor row_ptr = std::get<0>(csr); Tensor col_ind = std::get<1>(csr); Tensor layer_ind = std::get<2>(csr); Tensor value = std::get<3>(csr); hipSetDevice(input.get_device()); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int dim_per_block = 32; // warpSize const int num_dim_block = (dim + dim_per_block * kCoarseningFactor - 1) / (dim_per_block * kCoarseningFactor); const int row_per_block = kThreadPerBlock / dim_per_block; const int num_row_block = (num_row + row_per_block - 1) / row_per_block; if (sparse.requires_grad()) AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] { const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t)); hipLaunchKernelGGL(( rspmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>) , dim3(dim3(num_row_block, num_dim_block)), dim3(dim3(dim_per_block, row_per_block)), memory_size, stream, row_ptr.data_ptr<int64_t>(), col_ind.data_ptr<int64_t>(), layer_ind.data_ptr<int64_t>(), value.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), output_grad.data_ptr<scalar_t>(), value_grad.data_ptr<scalar_t>(), relation_grad.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), num_row, nnz, dim ); }); else AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] { const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t)); hipLaunchKernelGGL(( rspmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>) , dim3(dim3(num_row_block, num_dim_block)), dim3(dim3(dim_per_block, row_per_block)), memory_size, stream, row_ptr.data_ptr<int64_t>(), col_ind.data_ptr<int64_t>(), layer_ind.data_ptr<int64_t>(), value.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), output_grad.data_ptr<scalar_t>(), relation_grad.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), num_row, nnz, dim ); }); return std::make_tuple(sparse_grad, relation_grad, input_grad); } #define DECLARE_FORWARD_IMPL(ADD, MUL, NARYOP, BINARYOP) \ Tensor rspmm_##ADD##_##MUL##_forward_cuda( \ const SparseTensor &sparse, const Tensor &relation, const Tensor &input) { \ return rspmm_forward_cuda<NARYOP, BINARYOP>(sparse, relation, input); \ } #define DECLARE_BACKWARD_IMPL(ADD, MUL, NARYOP, BINARYOP) \ std::tuple<SparseTensor, Tensor, Tensor> rspmm_##ADD##_##MUL##_backward_cuda( \ const SparseTensor &sparse, const Tensor &relation, const Tensor &input, const Tensor &output, \ const Tensor &output_grad) { \ return rspmm_backward_cuda<NARYOP, BINARYOP>(sparse, relation, input, output, output_grad); \ } DECLARE_FORWARD_IMPL(add, mul, NaryAdd, BinaryMul) DECLARE_BACKWARD_IMPL(add, mul, NaryAdd, BinaryMul) DECLARE_FORWARD_IMPL(min, mul, NaryMin, BinaryMul) DECLARE_BACKWARD_IMPL(min, mul, NaryMin, BinaryMul) DECLARE_FORWARD_IMPL(max, mul, NaryMax, BinaryMul) DECLARE_BACKWARD_IMPL(max, mul, NaryMax, BinaryMul) DECLARE_FORWARD_IMPL(min, add, NaryMin, BinaryAdd) DECLARE_BACKWARD_IMPL(min, add, NaryMin, BinaryAdd) DECLARE_FORWARD_IMPL(max, add, NaryMax, BinaryAdd) DECLARE_BACKWARD_IMPL(max, add, NaryMax, BinaryAdd) } // namespace at
74169f59efd296f721bfe474ad4d3aacfbb5381b.cu
#include <ATen/cuda/CUDAContext.h> #include <THC/THCAtomics.cuh> #include "util.cuh" #include "operator.cuh" #include "rspmm.h" namespace at { // Memory & time efficient implementation of generalized spmm // Much of the code is inspired by GE-SpMM // https://github.com/hgyhungry/ge-spmm namespace { const int kCoarseningFactor = 2; const int kThreadPerBlock = 256; } // namespace anonymous template <class scalar_t, class NaryOp, class BinaryOp> __global__ void rspmm_forward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const int64_t *layer_ind, const scalar_t *value, const scalar_t *relation, const scalar_t *input, scalar_t *output, int64_t num_row, int64_t nnz, int64_t dim) { // for best optimization, the following code is compiled with constant warpSize assert(blockDim.x == warpSize); extern __shared__ int64_t buffer[]; int64_t *col_ind_buf = buffer; int64_t *layer_ind_buf = buffer + blockDim.y * warpSize; scalar_t *value_buf = reinterpret_cast<scalar_t *>(layer_ind_buf + blockDim.y * warpSize); col_ind_buf += threadIdx.y * warpSize; layer_ind_buf += threadIdx.y * warpSize; value_buf += threadIdx.y * warpSize; int64_t row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= num_row) return; int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x; int64_t ptr_start = row_ptr[row]; int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz; scalar_t out[kCoarseningFactor]; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) out[i] = NaryOp::zero; for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) { int64_t ptr = block_ptr + threadIdx.x; if (ptr < ptr_end) { col_ind_buf[threadIdx.x] = col_ind[ptr]; layer_ind_buf[threadIdx.x] = layer_ind[ptr]; value_buf[threadIdx.x] = value[ptr]; } __syncwarp(); int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr; for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) { int64_t col = col_ind_buf[offset_ptr]; int64_t layer = layer_ind_buf[offset_ptr]; scalar_t val = value_buf[offset_ptr]; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; scalar_t x = BinaryOp::forward(relation[layer * dim + d], input[col * dim + d]); scalar_t y = val * x; out[i] = NaryOp::forward(out[i], y); } } __syncwarp(); } #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; output[row * dim + d] = out[i]; } } template <class scalar_t, class NaryOp, class BinaryOp> __global__ void rspmm_backward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const int64_t *layer_ind, const scalar_t *value, const scalar_t *relation, const scalar_t *input, const scalar_t *output, const scalar_t *output_grad, scalar_t *value_grad, scalar_t *relation_grad, scalar_t *input_grad, int64_t num_row, int64_t nnz, int64_t dim) { // for best optimization, the following code is compiled with constant warpSize assert(blockDim.x == warpSize); extern __shared__ int64_t buffer[]; int64_t *col_ind_buf = buffer; int64_t *layer_ind_buf = col_ind_buf + blockDim.y * warpSize; scalar_t *value_buf = reinterpret_cast<scalar_t *>(layer_ind_buf + blockDim.y * warpSize); col_ind_buf += threadIdx.y * warpSize; layer_ind_buf += threadIdx.y * warpSize; value_buf += threadIdx.y * warpSize; int64_t row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= num_row) return; int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x; int64_t ptr_start = row_ptr[row]; int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz; for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) { int64_t ptr = block_ptr + threadIdx.x; if (ptr < ptr_end) { col_ind_buf[threadIdx.x] = col_ind[ptr]; layer_ind_buf[threadIdx.x] = layer_ind[ptr]; value_buf[threadIdx.x] = value[ptr]; } __syncwarp(); int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr; for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) { int64_t col = col_ind_buf[offset_ptr]; int64_t layer = layer_ind_buf[offset_ptr]; scalar_t val = value_buf[offset_ptr]; scalar_t val_grad = 0; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; scalar_t rel = relation[layer * dim + d]; scalar_t in = input[col * dim + d]; scalar_t out = output[row * dim + d]; scalar_t out_grad = output_grad[row * dim + d]; scalar_t x = BinaryOp::forward(rel, in); scalar_t y = val * x; scalar_t dx_drel = BinaryOp::backward_lhs(rel, in); scalar_t dx_din = BinaryOp::backward_rhs(rel, in); scalar_t dout_dy = NaryOp::backward(out, y); scalar_t dy_dval = x; scalar_t dy_dx = val; val_grad += out_grad * dout_dy * dy_dval; atomicAdd(&relation_grad[layer * dim + d], out_grad * dout_dy * dy_dx * dx_drel); atomicAdd(&input_grad[col * dim + d], out_grad * dout_dy * dy_dx * dx_din); } val_grad = warp_reduce(val_grad); if (threadIdx.x == 0) atomicAdd(&value_grad[block_ptr + offset_ptr], val_grad); } __syncwarp(); } } // only relation & input require gradients template <class scalar_t, class NaryOp, class BinaryOp> __global__ void rspmm_backward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const int64_t *layer_ind, const scalar_t *value, const scalar_t *relation, const scalar_t *input, const scalar_t *output, const scalar_t *output_grad, scalar_t *relation_grad, scalar_t *input_grad, int64_t num_row, int64_t nnz, int64_t dim) { // for best optimization, the following code is compiled with constant warpSize assert(blockDim.x == warpSize); extern __shared__ int64_t buffer[]; int64_t *col_ind_buf = buffer; int64_t *layer_ind_buf = col_ind_buf + blockDim.y * warpSize; scalar_t *value_buf = reinterpret_cast<scalar_t *>(layer_ind_buf + blockDim.y * warpSize); col_ind_buf += threadIdx.y * warpSize; layer_ind_buf += threadIdx.y * warpSize; value_buf += threadIdx.y * warpSize; int64_t row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= num_row) return; int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x; int64_t ptr_start = row_ptr[row]; int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz; for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) { int64_t ptr = block_ptr + threadIdx.x; if (ptr < ptr_end) { col_ind_buf[threadIdx.x] = col_ind[ptr]; layer_ind_buf[threadIdx.x] = layer_ind[ptr]; value_buf[threadIdx.x] = value[ptr]; } __syncwarp(); int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr; for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) { int64_t col = col_ind_buf[offset_ptr]; int64_t layer = layer_ind_buf[offset_ptr]; scalar_t val = value_buf[offset_ptr]; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; scalar_t rel = relation[layer * dim + d]; scalar_t in = input[col * dim + d]; scalar_t out = output[row * dim + d]; scalar_t out_grad = output_grad[row * dim + d]; scalar_t x = BinaryOp::forward(rel, in); scalar_t y = val * x; scalar_t dx_drel = BinaryOp::backward_lhs(rel, in); scalar_t dx_din = BinaryOp::backward_rhs(rel, in); scalar_t dout_dy = NaryOp::backward(out, y); scalar_t dy_dx = val; atomicAdd(&relation_grad[layer * dim + d], out_grad * dout_dy * dy_dx * dx_drel); atomicAdd(&input_grad[col * dim + d], out_grad * dout_dy * dy_dx * dx_din); } } __syncwarp(); } } template <template<class> class NaryOp, template<class> class BinaryOp> Tensor rspmm_forward_cuda(const SparseTensor &sparse, const Tensor &relation_, const Tensor &input_) { constexpr const char *fn_name = "rspmm_forward_cuda"; TensorArg sparse_arg(sparse, "sparse", 1), relation_arg(relation_, "relation", 2), input_arg(input_, "input", 3); rspmm_forward_check(fn_name, sparse_arg, relation_arg, input_arg); checkAllSameGPU(fn_name, {sparse_arg, relation_arg, input_arg}); const Tensor relation = relation_.contiguous(); const Tensor input = input_.contiguous(); int64_t nnz = sparse._nnz(); int64_t dim = input.size(1); int64_t num_row = sparse.size(0); Tensor output = at::empty({num_row, dim}, input.options()); auto csr = coo2csr3d(sparse); Tensor row_ptr = std::get<0>(csr); Tensor col_ind = std::get<1>(csr); Tensor layer_ind = std::get<2>(csr); Tensor value = std::get<3>(csr); cudaSetDevice(input.get_device()); auto stream = at::cuda::getCurrentCUDAStream(); const int dim_per_block = 32; // warpSize const int num_dim_block = (dim + dim_per_block * kCoarseningFactor - 1) / (dim_per_block * kCoarseningFactor); const int row_per_block = kThreadPerBlock / dim_per_block; const int num_row_block = (num_row + row_per_block - 1) / row_per_block; AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] { const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t)); rspmm_forward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>> <<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>( row_ptr.data_ptr<int64_t>(), col_ind.data_ptr<int64_t>(), layer_ind.data_ptr<int64_t>(), value.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), num_row, nnz, dim ); }); return output; } template <template<class> class NaryOp, template<class> class BinaryOp> std::tuple<SparseTensor, Tensor, Tensor> rspmm_backward_cuda( const SparseTensor &sparse, const Tensor &relation_, const Tensor &input_, const Tensor &output_, const Tensor &output_grad_) { constexpr const char *fn_name = "rspmm_backward_cuda"; TensorArg sparse_arg(sparse, "sparse", 1), relation_arg(relation_, "relation", 2), input_arg(input_, "input", 3), output_arg(output_, "output", 4), output_grad_arg(output_grad_, "output_grad", 5); rspmm_backward_check(fn_name, sparse_arg, relation_arg, input_arg, output_arg, output_grad_arg); checkAllSameGPU(fn_name, {sparse_arg, relation_arg, input_arg, output_arg, output_grad_arg}); const Tensor relation = relation_.contiguous(); const Tensor input = input_.contiguous(); const Tensor output = output_.contiguous(); const Tensor output_grad = output_grad_.contiguous(); int64_t nnz = sparse._nnz(); int64_t dim = input.size(1); int64_t num_row = sparse.size(0); Tensor value_grad = at::zeros_like(sparse.values()); Tensor relation_grad = at::zeros_like(relation); Tensor input_grad = at::zeros_like(input); SparseTensor sparse_grad = at::_sparse_coo_tensor_unsafe(sparse.indices(), value_grad, sparse.sizes()); auto csr = coo2csr3d(sparse); Tensor row_ptr = std::get<0>(csr); Tensor col_ind = std::get<1>(csr); Tensor layer_ind = std::get<2>(csr); Tensor value = std::get<3>(csr); cudaSetDevice(input.get_device()); auto stream = at::cuda::getCurrentCUDAStream(); const int dim_per_block = 32; // warpSize const int num_dim_block = (dim + dim_per_block * kCoarseningFactor - 1) / (dim_per_block * kCoarseningFactor); const int row_per_block = kThreadPerBlock / dim_per_block; const int num_row_block = (num_row + row_per_block - 1) / row_per_block; if (sparse.requires_grad()) AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] { const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t)); rspmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>> <<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>( row_ptr.data_ptr<int64_t>(), col_ind.data_ptr<int64_t>(), layer_ind.data_ptr<int64_t>(), value.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), output_grad.data_ptr<scalar_t>(), value_grad.data_ptr<scalar_t>(), relation_grad.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), num_row, nnz, dim ); }); else AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] { const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t)); rspmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>> <<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>( row_ptr.data_ptr<int64_t>(), col_ind.data_ptr<int64_t>(), layer_ind.data_ptr<int64_t>(), value.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), output_grad.data_ptr<scalar_t>(), relation_grad.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), num_row, nnz, dim ); }); return std::make_tuple(sparse_grad, relation_grad, input_grad); } #define DECLARE_FORWARD_IMPL(ADD, MUL, NARYOP, BINARYOP) \ Tensor rspmm_##ADD##_##MUL##_forward_cuda( \ const SparseTensor &sparse, const Tensor &relation, const Tensor &input) { \ return rspmm_forward_cuda<NARYOP, BINARYOP>(sparse, relation, input); \ } #define DECLARE_BACKWARD_IMPL(ADD, MUL, NARYOP, BINARYOP) \ std::tuple<SparseTensor, Tensor, Tensor> rspmm_##ADD##_##MUL##_backward_cuda( \ const SparseTensor &sparse, const Tensor &relation, const Tensor &input, const Tensor &output, \ const Tensor &output_grad) { \ return rspmm_backward_cuda<NARYOP, BINARYOP>(sparse, relation, input, output, output_grad); \ } DECLARE_FORWARD_IMPL(add, mul, NaryAdd, BinaryMul) DECLARE_BACKWARD_IMPL(add, mul, NaryAdd, BinaryMul) DECLARE_FORWARD_IMPL(min, mul, NaryMin, BinaryMul) DECLARE_BACKWARD_IMPL(min, mul, NaryMin, BinaryMul) DECLARE_FORWARD_IMPL(max, mul, NaryMax, BinaryMul) DECLARE_BACKWARD_IMPL(max, mul, NaryMax, BinaryMul) DECLARE_FORWARD_IMPL(min, add, NaryMin, BinaryAdd) DECLARE_BACKWARD_IMPL(min, add, NaryMin, BinaryAdd) DECLARE_FORWARD_IMPL(max, add, NaryMax, BinaryAdd) DECLARE_BACKWARD_IMPL(max, add, NaryMax, BinaryAdd) } // namespace at
55e24a65c648c0d37e36b7822a6b3386374378cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __device__ double2 pow(double2 a, int b){ double r = sqrt(a.x*a.x + a.y*a.y); double theta = atan(a.y / a.x); return{pow(r,b)*cos(b*theta),pow(r,b)*sin(b*theta)}; } __global__ void ktorus_wfc(double *x, double *y, double *z, double *items, double winding, double *phi, double2 *wfc){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double rad = sqrt((x[xid] - items[6]) * (x[xid] - items[6]) + (y[yid] - items[7]) * (y[yid] - items[7])) - 0.5*items[0]; wfc[gid].x = exp(-( pow((rad)/(items[14]*items[15]*0.5),2) + pow((z[zid])/(items[14]*items[17]*0.5),2) ) ); wfc[gid].y = 0.0; }
55e24a65c648c0d37e36b7822a6b3386374378cd.cu
#include "includes.h" __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __device__ double2 pow(double2 a, int b){ double r = sqrt(a.x*a.x + a.y*a.y); double theta = atan(a.y / a.x); return{pow(r,b)*cos(b*theta),pow(r,b)*sin(b*theta)}; } __global__ void ktorus_wfc(double *x, double *y, double *z, double *items, double winding, double *phi, double2 *wfc){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double rad = sqrt((x[xid] - items[6]) * (x[xid] - items[6]) + (y[yid] - items[7]) * (y[yid] - items[7])) - 0.5*items[0]; wfc[gid].x = exp(-( pow((rad)/(items[14]*items[15]*0.5),2) + pow((z[zid])/(items[14]*items[17]*0.5),2) ) ); wfc[gid].y = 0.0; }
c9222bf6391ea375ecada6a8a9e890f7a5b52a6d.hip
// !!! This is a file automatically generated by hipify!!! /* * bSumSquares.cu * * Copyright 2021 mike <mike@fedora33> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. * * */ #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> __global__ void kernel(ulong* d_squares, const ulong n_squares, ulong* d_results, ulong N) { ulong i = threadIdx.x + (blockIdx.x * blockDim.x); if(i < N) { // scan in reverse the squares array // save first square which divides i in results[i] if(i > 3) { for(int x = n_squares-1; x > 0; x -= 1) { if((i % d_squares[x]) == 0) { d_results[i] = d_squares[x]; break; } } // for... } else { d_results[i] = i; } } // } int main(int argc, char **argv) { hipError_t error_id; ulong N = 1024*1024; // sum perfect square divisors up to this value // Allocate and set the host 'perfect squares' array ulong root_max = (ulong)floor(sqrt((double)N)); const ulong n_squares = root_max + 1; ulong h_squares[n_squares]; for(int x = 0; x < n_squares; x += 1) h_squares[x] = x*x; // Allocate memory on device for 'squares' ulong *d_squares; error_id = hipMalloc((void**)&d_squares, sizeof(ulong)*n_squares); if(error_id != hipSuccess) { printf("hipMalloc squares failed with %d\n", error_id); exit(1); } // Copy squares to device error_id = hipMemcpy(d_squares, h_squares, sizeof(ulong)*n_squares, hipMemcpyHostToDevice); if(error_id != hipSuccess) { printf("hipMemcpy squares to device failed with %d\n", error_id); exit(1); } // Allocate memory on host and device for 2 pages of N results ulong *results_0 = NULL, *results_1 = NULL; error_id = hipMallocManaged((void**)results_0, sizeof(ulong)*(N+1)); if(error_id != hipSuccess) { printf("hipMallocManaged (0) failed with %d\n", error_id); exit(1); } error_id = hipMallocManaged((void**)results_1, sizeof(ulong)*(N+1)); if(error_id != hipSuccess) { printf("hipMallocManaged (1) results failed with %d\n", error_id); exit(1); } // Set variables ulong total = 0; for(x = 0; x <= N; ++x) { results_0 = 0; } // clear results ulong *pagePtr = results_1; // new results go here ulong pageIdx = 0; // page counter // set configuration dim3 thread_size = (1024,1,1); dim3 block_size = (1024,1,1); // launch kernel // kernel<<<grid_size, block_size>>>(d_squares, n_squares, d_results, (N+1)); // Wait for device to finish? //hipDeviceSynchronize(); // Print results array // for(int x = 0; x < N+1; ++x) printf("%d:%ld ", x, h_results[x]); // printf("\n"); // Cleanup hipFree(d_squares); hipFree(results_0); hipFree(results_1); return 0; }
c9222bf6391ea375ecada6a8a9e890f7a5b52a6d.cu
/* * bSumSquares.cu * * Copyright 2021 mike <mike@fedora33> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. * * */ #include <stdio.h> #include <math.h> #include <cuda.h> __global__ void kernel(ulong* d_squares, const ulong n_squares, ulong* d_results, ulong N) { ulong i = threadIdx.x + (blockIdx.x * blockDim.x); if(i < N) { // scan in reverse the squares array // save first square which divides i in results[i] if(i > 3) { for(int x = n_squares-1; x > 0; x -= 1) { if((i % d_squares[x]) == 0) { d_results[i] = d_squares[x]; break; } } // for... } else { d_results[i] = i; } } // } int main(int argc, char **argv) { cudaError_t error_id; ulong N = 1024*1024; // sum perfect square divisors up to this value // Allocate and set the host 'perfect squares' array ulong root_max = (ulong)floor(sqrt((double)N)); const ulong n_squares = root_max + 1; ulong h_squares[n_squares]; for(int x = 0; x < n_squares; x += 1) h_squares[x] = x*x; // Allocate memory on device for 'squares' ulong *d_squares; error_id = cudaMalloc((void**)&d_squares, sizeof(ulong)*n_squares); if(error_id != cudaSuccess) { printf("cudaMalloc squares failed with %d\n", error_id); exit(1); } // Copy squares to device error_id = cudaMemcpy(d_squares, h_squares, sizeof(ulong)*n_squares, cudaMemcpyHostToDevice); if(error_id != cudaSuccess) { printf("cudaMemcpy squares to device failed with %d\n", error_id); exit(1); } // Allocate memory on host and device for 2 pages of N results ulong *results_0 = NULL, *results_1 = NULL; error_id = cudaMallocManaged((void**)results_0, sizeof(ulong)*(N+1)); if(error_id != cudaSuccess) { printf("cudaMallocManaged (0) failed with %d\n", error_id); exit(1); } error_id = cudaMallocManaged((void**)results_1, sizeof(ulong)*(N+1)); if(error_id != cudaSuccess) { printf("cudaMallocManaged (1) results failed with %d\n", error_id); exit(1); } // Set variables ulong total = 0; for(x = 0; x <= N; ++x) { results_0 = 0; } // clear results ulong *pagePtr = results_1; // new results go here ulong pageIdx = 0; // page counter // set configuration dim3 thread_size = (1024,1,1); dim3 block_size = (1024,1,1); // launch kernel // kernel<<<grid_size, block_size>>>(d_squares, n_squares, d_results, (N+1)); // Wait for device to finish? //cudaDeviceSynchronize(); // Print results array // for(int x = 0; x < N+1; ++x) printf("%d:%ld ", x, h_results[x]); // printf("\n"); // Cleanup cudaFree(d_squares); cudaFree(results_0); cudaFree(results_1); return 0; }
9b8c43e12dea68030ff4dbc0a95c466a53c8e799.hip
// !!! This is a file automatically generated by hipify!!! # include <stdlib.h> # include <stdio.h> # include <math.h> # include <time.h> # include <omp.h> #include "common.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <hip/device_functions.h> //#define N 1024*1024*12 #define BLOCK_SIZE 256 #define MAX_BLOCKS 65535 #define MAX_MEMORY BLOCK_SIZE * MAX_BLOCKS double f(double x) { double pi = 3.141592653589793; double value; value = 50.0 / (pi * (2500.0 * x * x + 1.0)); return value; } __device__ double fDev(double x) { double pi = 3.141592653589793; double value; value = 50.0 / (pi * (2500.0 * x * x + 1.0)); return value; } int sequential(int argc, char *argv[], Result_Vect *result) { double a; double b; double error; int i; int n; double total_q, total_t, total_s; double wtime_q, wtime_t, wtime_s; double x; double h; printf("\n\nSEQUENTIAL\n"); result->time = 0; if (argc != 4) { n = 10000000; a = 0.0; b = 10.0; } else { n = atoi(argv[1]); a = atoi(argv[2]); b = atoi(argv[3]); } printf("\n"); printf("QUAD:\n"); printf(" Estimate the integral of f(x) from A to B.\n"); printf(" f(x) = 50 / ( pi * ( 2500 * x * x + 1 ) ).\n"); printf("\n"); printf(" A = %f\n", a); printf(" B = %f\n", b); printf(" N = %d\n", n); // Quadratic rule wtime_q = omp_get_wtime(); total_q = 0.0; for (i = 0; i < n; i++) { x = ((double)(n - i - 1) * a + (double)(i)* b) / (double)(n - 1); total_q = total_q + f(x); } wtime_q = omp_get_wtime() - wtime_q; total_q = (b - a) * total_q / (double)n; result->time += wtime_q; result->value[0] = total_q; // Trapezoidal rule h = (b - a) / n; wtime_t = omp_get_wtime(); total_t = 0.0; for (i = 0; i < n; i++) { x = a + i * h; if (i > 0 && i < n - 1) total_t = total_t + f(x); else total_t = total_t + 0.5 * f(x); } total_t = h * total_t; wtime_t = omp_get_wtime() - wtime_t; result->time += wtime_t; result->value[1] = total_t; // Simpson 1/3 rule h = (b - a) / n; wtime_s = omp_get_wtime(); total_s = 0.0; for (i = 0; i < n; i++) { x = a + i * h; if (i == 0 || i == n - 1) total_s = total_s + f(x); else if (i % 2 == 1) total_s = total_s + 4 * f(x); else total_s = total_s + 2 * f(x); } total_s = h / 3 * total_s; wtime_s = omp_get_wtime() - wtime_s; result->time += wtime_s; result->value[2] = total_s; printf("\n"); printf(" Estimate quadratic rule = %24.16f\n", total_q); printf(" Estimate trapezoidal rule = %24.16f\n", total_t); printf(" Estimate Simpson 1/3 rule = %24.16f\n", total_s); printf(" Time quadratic rule = %f\n", wtime_q); printf(" Time trapezoidal rule = %f\n", wtime_t); printf(" Time Simpson 1/3 rule = %f\n", wtime_s); printf("\n"); printf(" Normal end of execution.\n"); printf("\n"); return 0; } /////////////////// parallel // Simple reduction kernel __global__ void reductionSumKernel(double* devA, double* blockResults, int n) { extern __shared__ double sharedData[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // Load block in the shared memory if (i < n) sharedData[tid] = devA[i]; else sharedData[tid] = 0; __syncthreads(); // Do reduction in shared memory for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sharedData[tid] += sharedData[tid + s]; } __syncthreads(); } // Write result for this block to global memory if (tid == 0) blockResults[blockIdx.x] = sharedData[0]; } double sumReduction(double* devA, int n) { double gpuSum = 0; int numBlocks = 0; double *devBlockRes; // Run kernel several times until the work is done numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; //printf("this is numblocks %d\n", numBlocks); hipMalloc((void **)&devBlockRes, numBlocks * sizeof(double)); hipLaunchKernelGGL(( reductionSumKernel) , dim3(numBlocks), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(double) , 0, devA, devBlockRes, n); while (numBlocks > 1) { n = numBlocks; numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; hipLaunchKernelGGL(( reductionSumKernel) , dim3(numBlocks), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(double) , 0, devBlockRes, devBlockRes, n); } // Copy back the results hipMemcpy(&gpuSum, devBlockRes, sizeof(double), hipMemcpyDeviceToHost); hipFree(devBlockRes); return gpuSum; } __global__ void compute_kernel_quad_big(double *devA, double a, double b, int n, int offset, int nSize) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x + offset; while (i < nSize + offset) { double x = ((double)(n - i - 1) * a + (double)(i)* b) / (double)(n - 1); devA[i-offset] = fDev(x); i += blockDim.x * gridDim.x; } } __global__ void compute_kernel_trapezoidal_big(double *devA, double a, double b, int n, int offset, int nSize) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x + offset; while (i < nSize + offset) { double h = (b - a) / n; double x = a + i * h; if (i > 0 && i < n - 1) devA[i - offset] = fDev(x); else devA[i - offset] = 0.5 * fDev(x); i += blockDim.x * gridDim.x; } } __global__ void compute_kernel_simpson_big(double *devA, double a, double b, int n, int offset, int nSize) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x + offset; if (i < nSize + offset) { double h = (b - a) / n; double x = a + i * h; if (i == 0 || i == n - 1) devA[i-offset] = fDev(x); else if (i % 2 == 1) devA[i-offset] = 4 * fDev(x); else devA[i-offset] = 2 * fDev(x); i += blockDim.x * gridDim.x; } } //__global__ void compute_kernel_quad(double *devA, double a, double b, int n) { // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < n) { // double x = ((double)(n - i - 1) * a + (double)(i)* b) / (double)(n - 1); // devA[i] = fDev(x); // } //} //__global__ void compute_kernel_trapezoidal(double *devA, double a, double b, int n) { // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < n) { // double h = (b - a) / n; // // double x = a + i * h; // if (i > 0 && i < n - 1) // devA[i] = fDev(x); // else // devA[i] = 0.5 * fDev(x); // } //} //double parallel_trapezoidal(double a, double b, int n) { // double *devA; // // hipMalloc((void **)&devA, n * sizeof(double)); // //hipMemcpy(devA, A, size, hipMemcpyHostToDevice); // // int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; // // compute_kernel_trapezoidal << < numBlocks, BLOCK_SIZE >> > (devA, a, b, n); // double total_q = sumReduction(devA, n); // // hipFree(devA); // // return total_q; //} //__global__ void compute_kernel_simpson(double *devA, double a, double b, int n) { // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < n) { // double h = (b - a) / n; // // double x = a + i * h; // if (i == 0 || i == n - 1) // devA[i] = fDev(x); // else if (i % 2 == 1) // devA[i] = 4 * fDev(x); // else // devA[i] = 2 * fDev(x); // } //} //double parallel_simpson(double a, double b, int n) { // double *devA; // // hipMalloc((void **)&devA, n * sizeof(double)); // //hipMemcpy(devA, A, size, hipMemcpyHostToDevice); // // int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; // // compute_kernel_simpson << < numBlocks, BLOCK_SIZE >> > (devA, a, b, n); // double total_q = sumReduction(devA, n); // // hipFree(devA); // // return total_q; //} double parallel_compute(double a, double b, int n, void (*kernel)(double*,double,double,int,int,int)) { double *devA; double total_q = 0; for (int ni = 0; ni < n; ni += MAX_MEMORY) { int nSize = MAX_MEMORY; if (n - ni < MAX_MEMORY) nSize = n - ni; hipMalloc((void **)&devA, nSize * sizeof(double)); if (hipSuccess != hipGetLastError()) { printf("couldnt allocate %d doubles\n", nSize); break; } int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; if (numBlocks > MAX_BLOCKS) numBlocks = MAX_BLOCKS; kernel << < numBlocks, BLOCK_SIZE >> > (devA, a, b, n, ni, nSize); if (hipSuccess != hipGetLastError()) { printf("error computing\n"); break; } total_q += sumReduction(devA, nSize); if (hipSuccess != hipGetLastError()) { printf("error reducting\n"); break; } hipFree(devA); } return total_q; } int parallel(int argc, char *argv[], Result_Vect *result) { double a; double b; double error; int i; int n; double total_q, total_t, total_s; double wtime_q, wtime_t, wtime_s; double x; double h; printf("\n\nPARALLEL\n"); result->time = 0; if (argc != 4) { n = 10000000; a = 0.0; b = 10.0; } else { n = atoi(argv[1]); a = atoi(argv[2]); b = atoi(argv[3]); } printf("\n"); printf("QUAD:\n"); printf(" Estimate the integral of f(x) from A to B.\n"); printf(" f(x) = 50 / ( pi * ( 2500 * x * x + 1 ) ).\n"); printf("\n"); printf(" A = %f\n", a); printf(" B = %f\n", b); printf(" N = %d\n", n); // Quadratic rule wtime_q = omp_get_wtime(); /*total_q = 0.0; for (i = 0; i < n; i++) { x = ((double)(n - i - 1) * a + (double)(i)* b) / (double)(n - 1); total_q = total_q + f(x); }*/ total_q = parallel_compute(a, b, n, compute_kernel_quad_big); total_q = (b - a) * total_q / (double)n; wtime_q = omp_get_wtime() - wtime_q; result->time += wtime_q; result->value[0] = total_q; // Trapezoidal rule h = (b - a) / n; wtime_t = omp_get_wtime(); /*total_t = 0.0; for (i = 0; i < n; i++) { x = a + i * h; if (i > 0 && i < n - 1) total_t = total_t + f(x); else total_t = total_t + 0.5 * f(x); }*/ /*total_t = parallel_trapezoidal(a, b, n);*/ total_t = parallel_compute(a, b, n, compute_kernel_trapezoidal_big); total_t = h * total_t; wtime_t = omp_get_wtime() - wtime_t; result->time += wtime_t; result->value[1] = total_t; // Simpson 1/3 rule h = (b - a) / n; wtime_s = omp_get_wtime(); /* total_s = 0.0; for (i = 0; i < n; i++) { x = a + i * h; if (i == 0 || i == n - 1) total_s = total_s + f(x); else if (i % 2 == 1) total_s = total_s + 4 * f(x); else total_s = total_s + 2 * f(x); }*/ total_s = parallel_compute(a, b, n, compute_kernel_simpson_big); /*total_s = parallel_simpson(a, b, n);*/ total_s = h / 3 * total_s; wtime_s = omp_get_wtime() - wtime_s; result->time += wtime_s; result->value[2] = total_s; printf("\n"); printf(" Estimate quadratic rule = %24.16f\n", total_q); printf(" Estimate trapezoidal rule = %24.16f\n", total_t); printf(" Estimate Simpson 1/3 rule = %24.16f\n", total_s); printf(" Time quadratic rule = %f\n", wtime_q); printf(" Time trapezoidal rule = %f\n", wtime_t); printf(" Time Simpson 1/3 rule = %f\n", wtime_s); printf("\n"); printf(" Normal end of execution.\n"); printf("\n"); return 0; } int main(int argc, char *argv[]) { //double sequential_result, parallel_result, sequential_time, parallel_time; Result_Vect seq_result; Result_Vect par_result; seq_result.val_size = 3; seq_result.value = (double*)malloc(3 * sizeof(double)); par_result.val_size = 3; par_result.value = (double*)malloc(3 * sizeof(double)); /*for (int i = 1; ; i <<= 1) { double *nekid; hipMalloc((void **)&nekid, i * sizeof(double)); if (hipSuccess != hipGetLastError()) { printf("couldnt allocate %d doubles\n", i); break; } else { printf("allocated %d doubles\n", i); hipFree(nekid); } }*/ sequential(argc, argv, &seq_result); parallel(argc, argv, &par_result); compare_and_print_vect(seq_result, par_result, "Numeric integration"); }
9b8c43e12dea68030ff4dbc0a95c466a53c8e799.cu
# include <stdlib.h> # include <stdio.h> # include <math.h> # include <time.h> # include <omp.h> #include "common.h" #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <device_functions.h> //#define N 1024*1024*12 #define BLOCK_SIZE 256 #define MAX_BLOCKS 65535 #define MAX_MEMORY BLOCK_SIZE * MAX_BLOCKS double f(double x) { double pi = 3.141592653589793; double value; value = 50.0 / (pi * (2500.0 * x * x + 1.0)); return value; } __device__ double fDev(double x) { double pi = 3.141592653589793; double value; value = 50.0 / (pi * (2500.0 * x * x + 1.0)); return value; } int sequential(int argc, char *argv[], Result_Vect *result) { double a; double b; double error; int i; int n; double total_q, total_t, total_s; double wtime_q, wtime_t, wtime_s; double x; double h; printf("\n\nSEQUENTIAL\n"); result->time = 0; if (argc != 4) { n = 10000000; a = 0.0; b = 10.0; } else { n = atoi(argv[1]); a = atoi(argv[2]); b = atoi(argv[3]); } printf("\n"); printf("QUAD:\n"); printf(" Estimate the integral of f(x) from A to B.\n"); printf(" f(x) = 50 / ( pi * ( 2500 * x * x + 1 ) ).\n"); printf("\n"); printf(" A = %f\n", a); printf(" B = %f\n", b); printf(" N = %d\n", n); // Quadratic rule wtime_q = omp_get_wtime(); total_q = 0.0; for (i = 0; i < n; i++) { x = ((double)(n - i - 1) * a + (double)(i)* b) / (double)(n - 1); total_q = total_q + f(x); } wtime_q = omp_get_wtime() - wtime_q; total_q = (b - a) * total_q / (double)n; result->time += wtime_q; result->value[0] = total_q; // Trapezoidal rule h = (b - a) / n; wtime_t = omp_get_wtime(); total_t = 0.0; for (i = 0; i < n; i++) { x = a + i * h; if (i > 0 && i < n - 1) total_t = total_t + f(x); else total_t = total_t + 0.5 * f(x); } total_t = h * total_t; wtime_t = omp_get_wtime() - wtime_t; result->time += wtime_t; result->value[1] = total_t; // Simpson 1/3 rule h = (b - a) / n; wtime_s = omp_get_wtime(); total_s = 0.0; for (i = 0; i < n; i++) { x = a + i * h; if (i == 0 || i == n - 1) total_s = total_s + f(x); else if (i % 2 == 1) total_s = total_s + 4 * f(x); else total_s = total_s + 2 * f(x); } total_s = h / 3 * total_s; wtime_s = omp_get_wtime() - wtime_s; result->time += wtime_s; result->value[2] = total_s; printf("\n"); printf(" Estimate quadratic rule = %24.16f\n", total_q); printf(" Estimate trapezoidal rule = %24.16f\n", total_t); printf(" Estimate Simpson 1/3 rule = %24.16f\n", total_s); printf(" Time quadratic rule = %f\n", wtime_q); printf(" Time trapezoidal rule = %f\n", wtime_t); printf(" Time Simpson 1/3 rule = %f\n", wtime_s); printf("\n"); printf(" Normal end of execution.\n"); printf("\n"); return 0; } /////////////////// parallel // Simple reduction kernel __global__ void reductionSumKernel(double* devA, double* blockResults, int n) { extern __shared__ double sharedData[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // Load block in the shared memory if (i < n) sharedData[tid] = devA[i]; else sharedData[tid] = 0; __syncthreads(); // Do reduction in shared memory for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sharedData[tid] += sharedData[tid + s]; } __syncthreads(); } // Write result for this block to global memory if (tid == 0) blockResults[blockIdx.x] = sharedData[0]; } double sumReduction(double* devA, int n) { double gpuSum = 0; int numBlocks = 0; double *devBlockRes; // Run kernel several times until the work is done numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; //printf("this is numblocks %d\n", numBlocks); cudaMalloc((void **)&devBlockRes, numBlocks * sizeof(double)); reductionSumKernel <<< numBlocks, BLOCK_SIZE, BLOCK_SIZE * sizeof(double) >>>(devA, devBlockRes, n); while (numBlocks > 1) { n = numBlocks; numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; reductionSumKernel <<< numBlocks, BLOCK_SIZE, BLOCK_SIZE * sizeof(double) >>>(devBlockRes, devBlockRes, n); } // Copy back the results cudaMemcpy(&gpuSum, devBlockRes, sizeof(double), cudaMemcpyDeviceToHost); cudaFree(devBlockRes); return gpuSum; } __global__ void compute_kernel_quad_big(double *devA, double a, double b, int n, int offset, int nSize) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x + offset; while (i < nSize + offset) { double x = ((double)(n - i - 1) * a + (double)(i)* b) / (double)(n - 1); devA[i-offset] = fDev(x); i += blockDim.x * gridDim.x; } } __global__ void compute_kernel_trapezoidal_big(double *devA, double a, double b, int n, int offset, int nSize) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x + offset; while (i < nSize + offset) { double h = (b - a) / n; double x = a + i * h; if (i > 0 && i < n - 1) devA[i - offset] = fDev(x); else devA[i - offset] = 0.5 * fDev(x); i += blockDim.x * gridDim.x; } } __global__ void compute_kernel_simpson_big(double *devA, double a, double b, int n, int offset, int nSize) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x + offset; if (i < nSize + offset) { double h = (b - a) / n; double x = a + i * h; if (i == 0 || i == n - 1) devA[i-offset] = fDev(x); else if (i % 2 == 1) devA[i-offset] = 4 * fDev(x); else devA[i-offset] = 2 * fDev(x); i += blockDim.x * gridDim.x; } } //__global__ void compute_kernel_quad(double *devA, double a, double b, int n) { // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < n) { // double x = ((double)(n - i - 1) * a + (double)(i)* b) / (double)(n - 1); // devA[i] = fDev(x); // } //} //__global__ void compute_kernel_trapezoidal(double *devA, double a, double b, int n) { // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < n) { // double h = (b - a) / n; // // double x = a + i * h; // if (i > 0 && i < n - 1) // devA[i] = fDev(x); // else // devA[i] = 0.5 * fDev(x); // } //} //double parallel_trapezoidal(double a, double b, int n) { // double *devA; // // cudaMalloc((void **)&devA, n * sizeof(double)); // //cudaMemcpy(devA, A, size, cudaMemcpyHostToDevice); // // int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; // // compute_kernel_trapezoidal << < numBlocks, BLOCK_SIZE >> > (devA, a, b, n); // double total_q = sumReduction(devA, n); // // cudaFree(devA); // // return total_q; //} //__global__ void compute_kernel_simpson(double *devA, double a, double b, int n) { // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < n) { // double h = (b - a) / n; // // double x = a + i * h; // if (i == 0 || i == n - 1) // devA[i] = fDev(x); // else if (i % 2 == 1) // devA[i] = 4 * fDev(x); // else // devA[i] = 2 * fDev(x); // } //} //double parallel_simpson(double a, double b, int n) { // double *devA; // // cudaMalloc((void **)&devA, n * sizeof(double)); // //cudaMemcpy(devA, A, size, cudaMemcpyHostToDevice); // // int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; // // compute_kernel_simpson << < numBlocks, BLOCK_SIZE >> > (devA, a, b, n); // double total_q = sumReduction(devA, n); // // cudaFree(devA); // // return total_q; //} double parallel_compute(double a, double b, int n, void (*kernel)(double*,double,double,int,int,int)) { double *devA; double total_q = 0; for (int ni = 0; ni < n; ni += MAX_MEMORY) { int nSize = MAX_MEMORY; if (n - ni < MAX_MEMORY) nSize = n - ni; cudaMalloc((void **)&devA, nSize * sizeof(double)); if (cudaSuccess != cudaGetLastError()) { printf("couldnt allocate %d doubles\n", nSize); break; } int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; if (numBlocks > MAX_BLOCKS) numBlocks = MAX_BLOCKS; kernel << < numBlocks, BLOCK_SIZE >> > (devA, a, b, n, ni, nSize); if (cudaSuccess != cudaGetLastError()) { printf("error computing\n"); break; } total_q += sumReduction(devA, nSize); if (cudaSuccess != cudaGetLastError()) { printf("error reducting\n"); break; } cudaFree(devA); } return total_q; } int parallel(int argc, char *argv[], Result_Vect *result) { double a; double b; double error; int i; int n; double total_q, total_t, total_s; double wtime_q, wtime_t, wtime_s; double x; double h; printf("\n\nPARALLEL\n"); result->time = 0; if (argc != 4) { n = 10000000; a = 0.0; b = 10.0; } else { n = atoi(argv[1]); a = atoi(argv[2]); b = atoi(argv[3]); } printf("\n"); printf("QUAD:\n"); printf(" Estimate the integral of f(x) from A to B.\n"); printf(" f(x) = 50 / ( pi * ( 2500 * x * x + 1 ) ).\n"); printf("\n"); printf(" A = %f\n", a); printf(" B = %f\n", b); printf(" N = %d\n", n); // Quadratic rule wtime_q = omp_get_wtime(); /*total_q = 0.0; for (i = 0; i < n; i++) { x = ((double)(n - i - 1) * a + (double)(i)* b) / (double)(n - 1); total_q = total_q + f(x); }*/ total_q = parallel_compute(a, b, n, compute_kernel_quad_big); total_q = (b - a) * total_q / (double)n; wtime_q = omp_get_wtime() - wtime_q; result->time += wtime_q; result->value[0] = total_q; // Trapezoidal rule h = (b - a) / n; wtime_t = omp_get_wtime(); /*total_t = 0.0; for (i = 0; i < n; i++) { x = a + i * h; if (i > 0 && i < n - 1) total_t = total_t + f(x); else total_t = total_t + 0.5 * f(x); }*/ /*total_t = parallel_trapezoidal(a, b, n);*/ total_t = parallel_compute(a, b, n, compute_kernel_trapezoidal_big); total_t = h * total_t; wtime_t = omp_get_wtime() - wtime_t; result->time += wtime_t; result->value[1] = total_t; // Simpson 1/3 rule h = (b - a) / n; wtime_s = omp_get_wtime(); /* total_s = 0.0; for (i = 0; i < n; i++) { x = a + i * h; if (i == 0 || i == n - 1) total_s = total_s + f(x); else if (i % 2 == 1) total_s = total_s + 4 * f(x); else total_s = total_s + 2 * f(x); }*/ total_s = parallel_compute(a, b, n, compute_kernel_simpson_big); /*total_s = parallel_simpson(a, b, n);*/ total_s = h / 3 * total_s; wtime_s = omp_get_wtime() - wtime_s; result->time += wtime_s; result->value[2] = total_s; printf("\n"); printf(" Estimate quadratic rule = %24.16f\n", total_q); printf(" Estimate trapezoidal rule = %24.16f\n", total_t); printf(" Estimate Simpson 1/3 rule = %24.16f\n", total_s); printf(" Time quadratic rule = %f\n", wtime_q); printf(" Time trapezoidal rule = %f\n", wtime_t); printf(" Time Simpson 1/3 rule = %f\n", wtime_s); printf("\n"); printf(" Normal end of execution.\n"); printf("\n"); return 0; } int main(int argc, char *argv[]) { //double sequential_result, parallel_result, sequential_time, parallel_time; Result_Vect seq_result; Result_Vect par_result; seq_result.val_size = 3; seq_result.value = (double*)malloc(3 * sizeof(double)); par_result.val_size = 3; par_result.value = (double*)malloc(3 * sizeof(double)); /*for (int i = 1; ; i <<= 1) { double *nekid; cudaMalloc((void **)&nekid, i * sizeof(double)); if (cudaSuccess != cudaGetLastError()) { printf("couldnt allocate %d doubles\n", i); break; } else { printf("allocated %d doubles\n", i); cudaFree(nekid); } }*/ sequential(argc, argv, &seq_result); parallel(argc, argv, &par_result); compare_and_print_vect(seq_result, par_result, "Numeric integration"); }
6351a8c21d43768243ba6bb324c95403f9e9e5ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_CUDNN #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { const Dtype alpha = 1.0; const Dtype beta = 0.0; cudnnConvolutionFwdAlgo_t algo; CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, // memoryLimitInBytes, &algo)); size_t workspaceSizeInBytes_temp = 0; CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], algo, &workspaceSizeInBytes_temp)); if (workspaceSizeInBytes_temp > workspaceSizeInBytes) { workspaceSizeInBytes = workspaceSizeInBytes_temp; // free the existing workspace and allocate a new (larger) one if (this->workspace != NULL) { hipFree(this->workspace); } hipMalloc(&(this->workspace), workspaceSizeInBytes); CUDA_POST_KERNEL_CHECK; } // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], (void *)(&alpha), bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + weight_offset_ * g, conv_descs_[i], algo, workspace, workspaceSizeInBytes, (void *)(&beta), top_descs_[i], top_data + top_offset_ * g) ); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); Dtype alpha = 1.0; Dtype beta = 1.0; CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C, (void *)(&alpha), bias_desc_, bias_data + bias_offset_ * g, (void *)(&beta), top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype alpha = 1.0; Dtype beta = 1.0; CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], (void *)(&alpha), top_descs_[i], top_diff + top_offset_ * g, (void *)(&beta), bias_desc_, bias_diff + bias_offset_ * g) ); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { Dtype alpha = 1.0; Dtype beta = 1.0; const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], (void *)(&alpha), bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], (void *)(&beta), filter_desc_, weight_diff + weight_offset_ * g) ); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { Dtype alpha = 1.0; Dtype beta = 0.0; Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], (void *)(&alpha), filter_desc_, weight + weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], (void *)(&beta), bottom_descs_[i], bottom_diff + bottom_offset_ * g) ); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
6351a8c21d43768243ba6bb324c95403f9e9e5ad.cu
#ifdef USE_CUDNN #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { const Dtype alpha = 1.0; const Dtype beta = 0.0; cudnnConvolutionFwdAlgo_t algo; CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, // memoryLimitInBytes, &algo)); size_t workspaceSizeInBytes_temp = 0; CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], algo, &workspaceSizeInBytes_temp)); if (workspaceSizeInBytes_temp > workspaceSizeInBytes) { workspaceSizeInBytes = workspaceSizeInBytes_temp; // free the existing workspace and allocate a new (larger) one if (this->workspace != NULL) { cudaFree(this->workspace); } cudaMalloc(&(this->workspace), workspaceSizeInBytes); CUDA_POST_KERNEL_CHECK; } // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], (void *)(&alpha), bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + weight_offset_ * g, conv_descs_[i], algo, workspace, workspaceSizeInBytes, (void *)(&beta), top_descs_[i], top_data + top_offset_ * g) ); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); Dtype alpha = 1.0; Dtype beta = 1.0; CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C, (void *)(&alpha), bias_desc_, bias_data + bias_offset_ * g, (void *)(&beta), top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype alpha = 1.0; Dtype beta = 1.0; CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], (void *)(&alpha), top_descs_[i], top_diff + top_offset_ * g, (void *)(&beta), bias_desc_, bias_diff + bias_offset_ * g) ); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { Dtype alpha = 1.0; Dtype beta = 1.0; const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], (void *)(&alpha), bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], (void *)(&beta), filter_desc_, weight_diff + weight_offset_ * g) ); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { Dtype alpha = 1.0; Dtype beta = 0.0; Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], (void *)(&alpha), filter_desc_, weight + weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], (void *)(&beta), bottom_descs_[i], bottom_diff + bottom_offset_ * g) ); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
be34692fa380a113ec3df257196b8833cda2164d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "common_hip.cuh" #include <kat/on_device/shuffle.cuh> #include <kat/containers/array.hpp> // TODO: Run some/all tests for half-precision floating-point values, e.g __half from: // #include <hip/hip_fp16.h> // TODO: Also test behavior with warps with some inactive/exited lanes #include <kat/detail/execution_space_specifiers.hpp> namespace kernels { template <typename T, std::size_t N> KAT_FHD kat::array<T, N>& operator++(::kat::array<T, N>& x) { for(auto& e : x) { e++; } return x; } template <typename T, std::size_t N> KAT_FHD kat::array<T, N> operator++(::kat::array<T, N>& x, int) { kat::array<T, N> copy; for(auto& e : x) { e++; } return copy; } // TODO: Add __restrict__ to these kernels ... but that triggers a bug, for some reason, with CUDA 9.2 template <typename T> __global__ void test_shuffle_up( const T* unshuffled, T* shuffled, unsigned delta) { assert(gridDim.y == 1 and blockDim.y == 1); auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; T datum { kat::shuffle_up(unshuffled[global_thread_index], delta) }; shuffled[global_thread_index] = datum; } template <typename T> __global__ void test_shuffle_down( const T* unshuffled, T* shuffled, unsigned delta) { assert(gridDim.y == 1 and blockDim.y == 1); auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; T datum { kat::shuffle_down(unshuffled[global_thread_index], delta) }; shuffled[global_thread_index] = datum; } template <typename T> __global__ void test_shuffle_xor( const T* unshuffled, T* shuffled, const int mask) { assert(gridDim.y == 1 and blockDim.y == 1); auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; // thread_printf("__shfl_xor_sync(%X, %d, %X, %d)", kat::full_warp_mask, 123, mask, kat::warp_size); T datum { // unshuffled[global_thread_index] kat::shuffle_xor(unshuffled[global_thread_index], mask) // kat::builtins::warp::shuffle::xor_(unshuffled[global_thread_index], mask) // shfl_xor_sync(kat::full_warp_mask, unshuffled[global_thread_index], mask, kat::warp_size) // 1000 + unshuffled[global_thread_index] //123 }; shuffled[global_thread_index] = datum; } template <typename T, typename F> __global__ void test_arbitrary_shuffle( const T* unshuffled, T* shuffled, F get_source_lane_for) { assert(gridDim.y == 1 and blockDim.y == 1); auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; auto lane_index = threadIdx.x % kat::warp_size; auto shuffle_source_lane = get_source_lane_for(lane_index); T datum { kat::shuffle_arbitrary(unshuffled[global_thread_index], shuffle_source_lane) }; shuffled[global_thread_index] = datum; } } // namespace kernels constexpr const auto num_full_warps { 2 }; // this is aribtrary; didn't just want to have 1. constexpr const auto block_size { num_full_warps * kat::warp_size }; TEST_SUITE("shuffle") { TEST_CASE_TEMPLATE("up", I, INTEGER_TYPES, FLOAT_TYPES ) //, ARRAY_TYPES_BY_SIZE) { cuda::device_t device { cuda::device::current::get() }; // TODO: Test shuffles with non-full warps. auto num_grid_blocks { 1 }; auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; auto device_side_unshuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; auto device_side_shuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; std::array<I, block_size> host_side_unshuffled; std::array<I, block_size> host_side_shuffled; std::iota(host_side_unshuffled.begin(),host_side_unshuffled.end(), 0); std::array<I, block_size> host_side_expected_shuffled; for(int delta = 0; delta < kat::warp_size; delta++) { for(std::size_t pos { 0 }; pos < host_side_expected_shuffled.size(); pos ++) { // Note: I wonder if it's a good idea not to use a typedef for lane indices. unsigned lane_index = pos % kat::warp_size; auto shuffle_origin_pos = (lane_index >= delta) ? (pos - delta) : pos; host_side_expected_shuffled[pos] = host_side_unshuffled[shuffle_origin_pos]; } cuda::memory::copy(device_side_unshuffled.get(), host_side_unshuffled.data(), sizeof(host_side_unshuffled)); cuda::launch( ::kernels::test_shuffle_up<I>, launch_config, device_side_unshuffled.get(), device_side_shuffled.get(), delta); cuda::memory::copy(host_side_shuffled.data(), device_side_shuffled.get(), sizeof(host_side_shuffled)); constexpr const auto print_results { false }; auto found_discrepancy { false }; for(auto i { 0 }; i < block_size; i++) { CHECK(host_side_shuffled[i] == host_side_expected_shuffled[i]); if (host_side_shuffled[i] != host_side_expected_shuffled[i]) { found_discrepancy = true; MESSAGE("index of discrepancy was: " << i); } } if (print_results) { if (found_discrepancy) { std::cout << "Unshuffled input:\n" << host_side_unshuffled << '\n'; std::cout << "Input shuffled up with delta = " << delta << ":\n" << host_side_unshuffled << '\n'; std::cout << "Expected shuffled up output : \n" << host_side_expected_shuffled << '\n'; } else { std::cout << "No discrepancies for type = " << util::type_name<I>() << ", delta = " << delta << ".\n"; } } } } TEST_CASE_TEMPLATE("down", I, INTEGER_TYPES, FLOAT_TYPES ) //, ARRAY_TYPES_BY_SIZE) { cuda::device_t device { cuda::device::current::get() }; // TODO: Test shuffles with non-full warps. auto num_grid_blocks { 1 }; auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; auto device_side_unshuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; auto device_side_shuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; std::array<I, block_size> host_side_unshuffled; std::array<I, block_size> host_side_shuffled; std::iota(host_side_unshuffled.begin(),host_side_unshuffled.end(), 0); std::array<I, block_size> host_side_expected_shuffled; for(int delta = 0; delta < kat::warp_size; delta++) { for(std::size_t pos { 0 }; pos < host_side_expected_shuffled.size(); pos ++) { // Note: I wonder if it's a good idea not to use a typedef for lane indices. unsigned lane_index = pos % kat::warp_size; auto shuffle_origin_pos = (lane_index < kat::warp_size - delta) ? (pos + delta) : pos; host_side_expected_shuffled[pos] = host_side_unshuffled[shuffle_origin_pos]; } cuda::memory::copy(device_side_unshuffled.get(), host_side_unshuffled.data(), sizeof(host_side_unshuffled)); cuda::launch( ::kernels::test_shuffle_down<I>, launch_config, device_side_unshuffled.get(), device_side_shuffled.get(), delta); cuda::memory::copy(host_side_shuffled.data(), device_side_shuffled.get(), sizeof(host_side_shuffled)); constexpr const auto print_results { false }; auto found_discrepancy { false }; for(auto i { 0 }; i < block_size; i++) { CHECK(host_side_shuffled[i] == host_side_expected_shuffled[i]); if (host_side_shuffled[i] != host_side_expected_shuffled[i]) { found_discrepancy = true; MESSAGE("index of discrepancy was: " << i); } } if (print_results) { if (found_discrepancy) { std::cout << "Unshuffled input:\n" << host_side_unshuffled << '\n'; std::cout << "Input shuffled up with delta = " << delta << ":\n" << host_side_unshuffled << '\n'; std::cout << "Expected shuffled up output : \n" << host_side_expected_shuffled << '\n'; } else { std::cout << "No discrepancies for type = " << util::type_name<I>() << ", delta = " << delta << ".\n"; } } } } TEST_CASE_TEMPLATE("xor", I, INTEGER_TYPES, FLOAT_TYPES ) //, ARRAY_TYPES_BY_SIZE) { cuda::device_t device { cuda::device::current::get() }; // TODO: Test shuffles with non-full warps. auto num_grid_blocks { 1 }; auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; auto device_side_unshuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; auto device_side_shuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; std::array<I, block_size> host_side_unshuffled; std::array<I, block_size> host_side_shuffled; std::iota(host_side_unshuffled.begin(),host_side_unshuffled.end(), 0); std::array<I, block_size> host_side_expected_shuffled; for(size_t mask_index { 0 }; mask_index < kat::warp_size; mask_index++) { // Note the mask can't have bits that aren't present in actual lane indices, // so the mask does nor exceed warp_size - 1 // std::uniform_int_distribution<kat::lane_mask_t> distribution(kat::empty_warp_mask, kat::full_warp_mask); // // util::random::seed(std::time(0)); // seed with the current time // auto mask = util::random::sample_from(distribution); int mask = mask_index; // yes, just like that // std::cout << "Using mask " << std::hex << (unsigned) mask << std::dec << std::endl; for(std::size_t pos { 0 }; pos < host_side_expected_shuffled.size(); pos ++) { // Note: I wonder if it's a good idea not to use a typedef for lane indices. unsigned lane_index = pos % kat::warp_size; auto shuffle_origin_pos = (pos - lane_index) ^ (lane_index xor mask); host_side_expected_shuffled[pos] = host_side_unshuffled[shuffle_origin_pos]; // std::cout << "pos = " << std::setw(2) << pos << ", host_side_expected_shuffled[" << std::setw(2) << pos << "] = " << std::setw(2) << host_side_expected_shuffled[pos] << std::endl; } cuda::memory::copy(device_side_unshuffled.get(), host_side_unshuffled.data(), sizeof(host_side_unshuffled)); cuda::launch( ::kernels::test_shuffle_xor<I>, launch_config, device_side_unshuffled.get(), device_side_shuffled.get(), mask); cuda::memory::copy(host_side_shuffled.data(), device_side_shuffled.get(), sizeof(host_side_shuffled)); constexpr const auto print_results { false }; auto found_discrepancy { false }; for(auto i { 0 }; i < block_size; i++) { CHECK(host_side_shuffled[i] == host_side_expected_shuffled[i]); if (host_side_shuffled[i] != host_side_expected_shuffled[i]) { found_discrepancy = true; MESSAGE("index of discrepancy was: " << i); } } if (print_results) { if (found_discrepancy) { std::cout << "Unshuffled input:\n" << host_side_unshuffled << '\n'; std::cout << "Input shuffled up with mask = " << std::hex << mask << std::dec << ":\n" << host_side_unshuffled << '\n'; std::cout << "Expected shuffled up output : \n" << host_side_expected_shuffled << '\n'; } else { std::cout << "No discrepancies for type = " << util::type_name<I>() << ", mask = " << std::hex << mask << std::dec << ".\n"; } } } } } // TEST_SUITE("shuffle")
be34692fa380a113ec3df257196b8833cda2164d.cu
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "common.cuh" #include <kat/on_device/shuffle.cuh> #include <kat/containers/array.hpp> // TODO: Run some/all tests for half-precision floating-point values, e.g __half from: // #include <cuda_fp16.h> // TODO: Also test behavior with warps with some inactive/exited lanes #include <kat/detail/execution_space_specifiers.hpp> namespace kernels { template <typename T, std::size_t N> KAT_FHD kat::array<T, N>& operator++(::kat::array<T, N>& x) { for(auto& e : x) { e++; } return x; } template <typename T, std::size_t N> KAT_FHD kat::array<T, N> operator++(::kat::array<T, N>& x, int) { kat::array<T, N> copy; for(auto& e : x) { e++; } return copy; } // TODO: Add __restrict__ to these kernels ... but that triggers a bug, for some reason, with CUDA 9.2 template <typename T> __global__ void test_shuffle_up( const T* unshuffled, T* shuffled, unsigned delta) { assert(gridDim.y == 1 and blockDim.y == 1); auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; T datum { kat::shuffle_up(unshuffled[global_thread_index], delta) }; shuffled[global_thread_index] = datum; } template <typename T> __global__ void test_shuffle_down( const T* unshuffled, T* shuffled, unsigned delta) { assert(gridDim.y == 1 and blockDim.y == 1); auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; T datum { kat::shuffle_down(unshuffled[global_thread_index], delta) }; shuffled[global_thread_index] = datum; } template <typename T> __global__ void test_shuffle_xor( const T* unshuffled, T* shuffled, const int mask) { assert(gridDim.y == 1 and blockDim.y == 1); auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; // thread_printf("__shfl_xor_sync(%X, %d, %X, %d)", kat::full_warp_mask, 123, mask, kat::warp_size); T datum { // unshuffled[global_thread_index] kat::shuffle_xor(unshuffled[global_thread_index], mask) // kat::builtins::warp::shuffle::xor_(unshuffled[global_thread_index], mask) // shfl_xor_sync(kat::full_warp_mask, unshuffled[global_thread_index], mask, kat::warp_size) // 1000 + unshuffled[global_thread_index] //123 }; shuffled[global_thread_index] = datum; } template <typename T, typename F> __global__ void test_arbitrary_shuffle( const T* unshuffled, T* shuffled, F get_source_lane_for) { assert(gridDim.y == 1 and blockDim.y == 1); auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; auto lane_index = threadIdx.x % kat::warp_size; auto shuffle_source_lane = get_source_lane_for(lane_index); T datum { kat::shuffle_arbitrary(unshuffled[global_thread_index], shuffle_source_lane) }; shuffled[global_thread_index] = datum; } } // namespace kernels constexpr const auto num_full_warps { 2 }; // this is aribtrary; didn't just want to have 1. constexpr const auto block_size { num_full_warps * kat::warp_size }; TEST_SUITE("shuffle") { TEST_CASE_TEMPLATE("up", I, INTEGER_TYPES, FLOAT_TYPES ) //, ARRAY_TYPES_BY_SIZE) { cuda::device_t device { cuda::device::current::get() }; // TODO: Test shuffles with non-full warps. auto num_grid_blocks { 1 }; auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; auto device_side_unshuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; auto device_side_shuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; std::array<I, block_size> host_side_unshuffled; std::array<I, block_size> host_side_shuffled; std::iota(host_side_unshuffled.begin(),host_side_unshuffled.end(), 0); std::array<I, block_size> host_side_expected_shuffled; for(int delta = 0; delta < kat::warp_size; delta++) { for(std::size_t pos { 0 }; pos < host_side_expected_shuffled.size(); pos ++) { // Note: I wonder if it's a good idea not to use a typedef for lane indices. unsigned lane_index = pos % kat::warp_size; auto shuffle_origin_pos = (lane_index >= delta) ? (pos - delta) : pos; host_side_expected_shuffled[pos] = host_side_unshuffled[shuffle_origin_pos]; } cuda::memory::copy(device_side_unshuffled.get(), host_side_unshuffled.data(), sizeof(host_side_unshuffled)); cuda::launch( ::kernels::test_shuffle_up<I>, launch_config, device_side_unshuffled.get(), device_side_shuffled.get(), delta); cuda::memory::copy(host_side_shuffled.data(), device_side_shuffled.get(), sizeof(host_side_shuffled)); constexpr const auto print_results { false }; auto found_discrepancy { false }; for(auto i { 0 }; i < block_size; i++) { CHECK(host_side_shuffled[i] == host_side_expected_shuffled[i]); if (host_side_shuffled[i] != host_side_expected_shuffled[i]) { found_discrepancy = true; MESSAGE("index of discrepancy was: " << i); } } if (print_results) { if (found_discrepancy) { std::cout << "Unshuffled input:\n" << host_side_unshuffled << '\n'; std::cout << "Input shuffled up with delta = " << delta << ":\n" << host_side_unshuffled << '\n'; std::cout << "Expected shuffled up output : \n" << host_side_expected_shuffled << '\n'; } else { std::cout << "No discrepancies for type = " << util::type_name<I>() << ", delta = " << delta << ".\n"; } } } } TEST_CASE_TEMPLATE("down", I, INTEGER_TYPES, FLOAT_TYPES ) //, ARRAY_TYPES_BY_SIZE) { cuda::device_t device { cuda::device::current::get() }; // TODO: Test shuffles with non-full warps. auto num_grid_blocks { 1 }; auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; auto device_side_unshuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; auto device_side_shuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; std::array<I, block_size> host_side_unshuffled; std::array<I, block_size> host_side_shuffled; std::iota(host_side_unshuffled.begin(),host_side_unshuffled.end(), 0); std::array<I, block_size> host_side_expected_shuffled; for(int delta = 0; delta < kat::warp_size; delta++) { for(std::size_t pos { 0 }; pos < host_side_expected_shuffled.size(); pos ++) { // Note: I wonder if it's a good idea not to use a typedef for lane indices. unsigned lane_index = pos % kat::warp_size; auto shuffle_origin_pos = (lane_index < kat::warp_size - delta) ? (pos + delta) : pos; host_side_expected_shuffled[pos] = host_side_unshuffled[shuffle_origin_pos]; } cuda::memory::copy(device_side_unshuffled.get(), host_side_unshuffled.data(), sizeof(host_side_unshuffled)); cuda::launch( ::kernels::test_shuffle_down<I>, launch_config, device_side_unshuffled.get(), device_side_shuffled.get(), delta); cuda::memory::copy(host_side_shuffled.data(), device_side_shuffled.get(), sizeof(host_side_shuffled)); constexpr const auto print_results { false }; auto found_discrepancy { false }; for(auto i { 0 }; i < block_size; i++) { CHECK(host_side_shuffled[i] == host_side_expected_shuffled[i]); if (host_side_shuffled[i] != host_side_expected_shuffled[i]) { found_discrepancy = true; MESSAGE("index of discrepancy was: " << i); } } if (print_results) { if (found_discrepancy) { std::cout << "Unshuffled input:\n" << host_side_unshuffled << '\n'; std::cout << "Input shuffled up with delta = " << delta << ":\n" << host_side_unshuffled << '\n'; std::cout << "Expected shuffled up output : \n" << host_side_expected_shuffled << '\n'; } else { std::cout << "No discrepancies for type = " << util::type_name<I>() << ", delta = " << delta << ".\n"; } } } } TEST_CASE_TEMPLATE("xor", I, INTEGER_TYPES, FLOAT_TYPES ) //, ARRAY_TYPES_BY_SIZE) { cuda::device_t device { cuda::device::current::get() }; // TODO: Test shuffles with non-full warps. auto num_grid_blocks { 1 }; auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; auto device_side_unshuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; auto device_side_shuffled { cuda::memory::device::make_unique<I[]>(device, block_size) }; std::array<I, block_size> host_side_unshuffled; std::array<I, block_size> host_side_shuffled; std::iota(host_side_unshuffled.begin(),host_side_unshuffled.end(), 0); std::array<I, block_size> host_side_expected_shuffled; for(size_t mask_index { 0 }; mask_index < kat::warp_size; mask_index++) { // Note the mask can't have bits that aren't present in actual lane indices, // so the mask does nor exceed warp_size - 1 // std::uniform_int_distribution<kat::lane_mask_t> distribution(kat::empty_warp_mask, kat::full_warp_mask); // // util::random::seed(std::time(0)); // seed with the current time // auto mask = util::random::sample_from(distribution); int mask = mask_index; // yes, just like that // std::cout << "Using mask " << std::hex << (unsigned) mask << std::dec << std::endl; for(std::size_t pos { 0 }; pos < host_side_expected_shuffled.size(); pos ++) { // Note: I wonder if it's a good idea not to use a typedef for lane indices. unsigned lane_index = pos % kat::warp_size; auto shuffle_origin_pos = (pos - lane_index) ^ (lane_index xor mask); host_side_expected_shuffled[pos] = host_side_unshuffled[shuffle_origin_pos]; // std::cout << "pos = " << std::setw(2) << pos << ", host_side_expected_shuffled[" << std::setw(2) << pos << "] = " << std::setw(2) << host_side_expected_shuffled[pos] << std::endl; } cuda::memory::copy(device_side_unshuffled.get(), host_side_unshuffled.data(), sizeof(host_side_unshuffled)); cuda::launch( ::kernels::test_shuffle_xor<I>, launch_config, device_side_unshuffled.get(), device_side_shuffled.get(), mask); cuda::memory::copy(host_side_shuffled.data(), device_side_shuffled.get(), sizeof(host_side_shuffled)); constexpr const auto print_results { false }; auto found_discrepancy { false }; for(auto i { 0 }; i < block_size; i++) { CHECK(host_side_shuffled[i] == host_side_expected_shuffled[i]); if (host_side_shuffled[i] != host_side_expected_shuffled[i]) { found_discrepancy = true; MESSAGE("index of discrepancy was: " << i); } } if (print_results) { if (found_discrepancy) { std::cout << "Unshuffled input:\n" << host_side_unshuffled << '\n'; std::cout << "Input shuffled up with mask = " << std::hex << mask << std::dec << ":\n" << host_side_unshuffled << '\n'; std::cout << "Expected shuffled up output : \n" << host_side_expected_shuffled << '\n'; } else { std::cout << "No discrepancies for type = " << util::type_name<I>() << ", mask = " << std::hex << mask << std::dec << ".\n"; } } } } } // TEST_SUITE("shuffle")
7a445a37589470488a63f539c6677f2613a51863.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "color_to_grey.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; uchar3 *input_image = NULL; hipMalloc(&input_image, XSIZE*YSIZE); uchar3 *output_image = NULL; hipMalloc(&output_image, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( color_to_grey), dim3(gridBlock),dim3(threadBlock), 0, 0, input_image,output_image,width,height); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( color_to_grey), dim3(gridBlock),dim3(threadBlock), 0, 0, input_image,output_image,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( color_to_grey), dim3(gridBlock),dim3(threadBlock), 0, 0, input_image,output_image,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7a445a37589470488a63f539c6677f2613a51863.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "color_to_grey.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; uchar3 *input_image = NULL; cudaMalloc(&input_image, XSIZE*YSIZE); uchar3 *output_image = NULL; cudaMalloc(&output_image, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); color_to_grey<<<gridBlock,threadBlock>>>(input_image,output_image,width,height); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { color_to_grey<<<gridBlock,threadBlock>>>(input_image,output_image,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { color_to_grey<<<gridBlock,threadBlock>>>(input_image,output_image,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a516acab78bd15cadc28dda02065b4df6c97f4f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vectorAddKernel(float* inputA, float* inputB, float* output, int length){ //compute element index int idx = blockIdx.x * blockDim.x + threadIdx.x; //add an vector element if(idx < length) output[idx] = inputA[idx] + inputB[idx]; }
a516acab78bd15cadc28dda02065b4df6c97f4f2.cu
#include "includes.h" __global__ void vectorAddKernel(float* inputA, float* inputB, float* output, int length){ //compute element index int idx = blockIdx.x * blockDim.x + threadIdx.x; //add an vector element if(idx < length) output[idx] = inputA[idx] + inputB[idx]; }
daf71a3ca078d68b0fcccc99757c5d44beb5da18.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #define BLOCKSIZE 256 __global__ void kern_set_val (float *gpu_ptr, float value, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; //TO DO: evaluate the value of i gpu_ptr[i] = value; } int main () { int i, failed=0; int N = 1024; // size of vector float *ptr; // Host pointer float *gpu_ptr; // Device pointer /* Allocate vector in Host*/ ptr = (float *)malloc(sizeof(float)*N); /* Allocate vector in Device*/ hipMalloc (&gpu_ptr, sizeof(float)*N); //TO DO : write kernel invocation here hipLaunchKernelGGL(( kern_set_val), dim3(N/BLOCKSIZE),dim3(BLOCKSIZE), 0, 0, gpu_ptr, 11,11); hipDeviceSynchronize (); //TO DO : copy data to host DONE hipMemcpy(ptr, gpu_ptr, sizeof(float)*N, hipMemcpyDeviceToHost); hipFree (gpu_ptr); /* Now check that it did what we want */ for (i = 0; i < 10; i++)//first ten values are written printf ("%f\t", ptr[i]); printf ("\n"); for (i = N-10; i < N; i++)//last ten values are written printf ("%f\t", ptr[i]); printf ("\n"); for (i = 0; i < N; i++) {//All values are compared if (fabs(ptr[i]-11.0) > 1e-8) { failed=1; } } if (failed) { printf ("FAILED !!\n"); } else { printf ("PASSED !!\n"); } free (ptr); }
daf71a3ca078d68b0fcccc99757c5d44beb5da18.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #define BLOCKSIZE 256 __global__ void kern_set_val (float *gpu_ptr, float value, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; //TO DO: evaluate the value of i gpu_ptr[i] = value; } int main () { int i, failed=0; int N = 1024; // size of vector float *ptr; // Host pointer float *gpu_ptr; // Device pointer /* Allocate vector in Host*/ ptr = (float *)malloc(sizeof(float)*N); /* Allocate vector in Device*/ cudaMalloc (&gpu_ptr, sizeof(float)*N); //TO DO : write kernel invocation here kern_set_val<<<N/BLOCKSIZE,BLOCKSIZE>>>(gpu_ptr, 11,11); cudaDeviceSynchronize (); //TO DO : copy data to host DONE cudaMemcpy(ptr, gpu_ptr, sizeof(float)*N, cudaMemcpyDeviceToHost); cudaFree (gpu_ptr); /* Now check that it did what we want */ for (i = 0; i < 10; i++)//first ten values are written printf ("%f\t", ptr[i]); printf ("\n"); for (i = N-10; i < N; i++)//last ten values are written printf ("%f\t", ptr[i]); printf ("\n"); for (i = 0; i < N; i++) {//All values are compared if (fabs(ptr[i]-11.0) > 1e-8) { failed=1; } } if (failed) { printf ("FAILED !!\n"); } else { printf ("PASSED !!\n"); } free (ptr); }
21871a57713dcddcd5b9df159d3877f304885588.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 10000000 #define numThread 2 #define numBlock 200 __global__ void add( int *a, int *b, int *c ) { int tid = blockDim.x * blockIdx.x + threadIdx.x; while (tid < N) { c[tid] = a[tid] + b[tid]; tid += blockDim.x; } } int main( void ) { int *a, *b, *c; // The arrays on the host CPU machine int *dev_a, *dev_b, *dev_c; // The arrays for the GPU device a = (int*)malloc( N * sizeof(int) ); b = (int*)malloc( N * sizeof(int) ); c = (int*)malloc( N * sizeof(int) ); for (int i=0; i<N; i++) { a[i] = i; b[i] = i; } hipMalloc( (void**)&dev_a, N * sizeof(int) ); hipMalloc( (void**)&dev_b, N * sizeof(int) ); hipMalloc( (void**)&dev_c, N * sizeof(int) ); hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( add), dim3(numBlock),dim3(numThread), 0, 0, dev_a, dev_b, dev_c ); hipMemcpy( c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost ); bool success = true; int total=0; printf("Checking %d values in the array.\n", N); for (int i=0; i<N; i++) { if ((a[i] + b[i]) != c[i]) { printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] ); success = false; } total += 1; } if (success) printf( "We did it, %d values correct!\n", total ); free( a ); free( b ); free( c ); hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); return 0; }
21871a57713dcddcd5b9df159d3877f304885588.cu
#include <stdio.h> #define N 10000000 #define numThread 2 #define numBlock 200 __global__ void add( int *a, int *b, int *c ) { int tid = blockDim.x * blockIdx.x + threadIdx.x; while (tid < N) { c[tid] = a[tid] + b[tid]; tid += blockDim.x; } } int main( void ) { int *a, *b, *c; // The arrays on the host CPU machine int *dev_a, *dev_b, *dev_c; // The arrays for the GPU device a = (int*)malloc( N * sizeof(int) ); b = (int*)malloc( N * sizeof(int) ); c = (int*)malloc( N * sizeof(int) ); for (int i=0; i<N; i++) { a[i] = i; b[i] = i; } cudaMalloc( (void**)&dev_a, N * sizeof(int) ); cudaMalloc( (void**)&dev_b, N * sizeof(int) ); cudaMalloc( (void**)&dev_c, N * sizeof(int) ); cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ); add<<<numBlock,numThread>>>( dev_a, dev_b, dev_c ); cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ); bool success = true; int total=0; printf("Checking %d values in the array.\n", N); for (int i=0; i<N; i++) { if ((a[i] + b[i]) != c[i]) { printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] ); success = false; } total += 1; } if (success) printf( "We did it, %d values correct!\n", total ); free( a ); free( b ); free( c ); cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; }
731dcc358a280e261eff8b3c37a74d6b4d751aaa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "exception.h" #include "helper_cuda.h" #include "helper_functions.h" #include "helper_image.h" #include "helper_string.h" #include "helper_timer.h" #ifndef NUMTRIALS #define NUMTRIALS (1024 * 1024) #endif #ifndef BLOCKSIZE #define BLOCKSIZE 512 #endif #ifndef NUMBLOCKS #define NUMBLOCKS (NUMTRIALS/BLOCKSIZE) #endif // ranges for the random numbers: const float GMIN = 20.0; // ground distance in meters const float GMAX = 30.0; // ground distance in meters const float HMIN = 10.0; // cliff height in meters const float HMAX = 40.0; // cliff height in meters const float DMIN = 10.0; // distance to castle in meters const float DMAX = 20.0; // distance to castle in meters const float VMIN = 30.0; // intial cnnonball velocity in meters / sec const float VMAX = 50.0; // intial cnnonball velocity in meters / sec const float THMIN = 70.0; // cannonball launch angle in degrees const float THMAX = 80.0; // cannonball launch angle in degrees const float GRAVITY = -9.8; // acceleraion due to gravity in meters / sec^2 const float TOL = 5.0; // tolerance in cannonball hitting the castle in meters // castle is destroyed if cannonball lands between d-TOL and d+TOL // function prototypes: float Ranf( float, float ); int Ranf( int, int ); void TimeOfDaySeed( ); void CudaCheckError( ); // degrees-to-radians -- callable from the device: __device__ float Radians( float d ) { return (M_PI/180.f) * d; } // the kernel: __global__ void MonteCarlo( float *dvs, float *dths, float *dgs, float *dhs, float *dds, int *dhits ) { unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x; // randomize everything: float v = dvs[gid]; float thr = Radians( dths[gid] ); float vx = v * cos(thr); float vy = v * sin(thr); float g = dgs[gid]; float h = dhs[gid]; float d = dds[gid]; int numHits = 0; // see if the ball doesn't even reach the cliff: float t = -vy / ( 0.5*GRAVITY ); float x = vx * t; if( x > g) { // see if the ball hits the vertical cliff face: t = g/vx; float t_squared = pow(t, 2); float y = vy*t + 0.5 * GRAVITY * t_squared; if( y > h ) { // the ball hits the upper deck: // the time solution for this is a quadratic equation of the form: // at^2 + bt + c = 0. // where 'a' multiplies time^2 // 'b' multiplies time // 'c' is a constant float a = 0.5 * GRAVITY; float b = vy; float c = -h; float disc = b*b - 4.f*a*c; // quadratic formula discriminant // successfully hits the ground above the cliff: // get the intersection: disc = sqrtf( disc ); float t1 = (-b + disc ) / ( 2.f*a ); // time to intersect high ground float t2 = (-b - disc ) / ( 2.f*a ); // time to intersect high ground // only care about the second intersection float tmax = t1; if( t2 > t1 ) tmax = t2; // how far does the ball land horizontlly from the edge of the cliff? float upperDist = vx * tmax - g; // see if the ball hits the castle: if( fabs( upperDist - d ) <= TOL ) { numHits = 1; } } // if ball clears the cliff face } // if ball gets as far as the cliff face dhits[gid] = numHits; } // for( # of monte carlo trials ) // these two #defines are just to label things // other than that, they do nothing: #define IN #define OUT int main( int argc, char* argv[ ] ) { TimeOfDaySeed( ); //int dev = findCudaDevice(argc, (const char **)argv); // better to define these here so that the rand() calls don't get into the thread timing: float *hvs = new float [NUMTRIALS]; float *hths = new float [NUMTRIALS]; float *hgs = new float [NUMTRIALS]; float *hhs = new float [NUMTRIALS]; float *hds = new float [NUMTRIALS]; int *hhits = new int [NUMTRIALS]; // fill the random-value arrays: for( int n = 0; n < NUMTRIALS; n++ ) { hvs[n] = Ranf( VMIN, VMAX ); hths[n] = Ranf( THMIN, THMAX ); hgs[n] = Ranf( GMIN, GMAX ); hhs[n] = Ranf( HMIN, HMAX ); hds[n] = Ranf( DMIN, DMAX ); } // allocate device memory: float *dvs, *dths, *dgs, *dhs, *dds; int *dhits; hipMalloc( &dvs, NUMTRIALS*sizeof(float) ); hipMalloc( &dths, NUMTRIALS*sizeof(float) ); hipMalloc( &dgs, NUMTRIALS*sizeof(float) ); hipMalloc( &dhs, NUMTRIALS*sizeof(float) ); hipMalloc( &dds, NUMTRIALS*sizeof(float) ); hipMalloc( &dhits, NUMTRIALS*sizeof(int) ); CudaCheckError( ); // copy host memory to the device: hipMemcpy( dvs, hvs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( dths, hths, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( dgs, hgs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( dhs, hhs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( dds, hds, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); CudaCheckError( ); // setup the execution parameters: dim3 grid( NUMBLOCKS, 1, 1 ); dim3 threads( BLOCKSIZE, 1, 1 ); // allocate cuda events that we'll use for timing: hipEvent_t start, stop; hipEventCreate( &start ); hipEventCreate( &stop ); CudaCheckError( ); // let the gpu go quiet: hipDeviceSynchronize( ); // record the start event: hipEventRecord( start, NULL ); CudaCheckError( ); // execute the kernel: hipLaunchKernelGGL(( MonteCarlo), dim3(grid), dim3(threads) , 0, 0, IN dvs, IN dths, IN dgs, IN dhs, IN dds, OUT dhits ); // record the stop event: hipEventRecord( stop, NULL ); CudaCheckError( ); // wait for the stop event to complete: hipDeviceSynchronize( ); hipEventSynchronize( stop ); CudaCheckError( ); float msecTotal = 0.0f; hipEventElapsedTime( &msecTotal, start, stop ); CudaCheckError( ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double multsPerSecond = (double)NUMTRIALS / secondsTotal; double megaMultsPerSecond = multsPerSecond / 1000000.; // copy result from the device to the host: hipMemcpy( hhits, dhits, NUMTRIALS*sizeof(int), hipMemcpyDeviceToHost ); CudaCheckError( ); // add up the hhits[ ] array: : int total_hits = 0; for( int n = 0; n < NUMTRIALS; n++ ) { total_hits += hhits[n]; } // compute and print the probability: float probability = (float)total_hits/(float)(NUMTRIALS); fprintf( stderr, "%12d\t%4d\t%10.2lf\t%6.2f%%\n", NUMTRIALS, BLOCKSIZE, megaMultsPerSecond, 100.*probability); // clean up host memory: delete [ ] hvs; delete [ ] hths; delete [ ] hgs; delete [ ] hhs; delete [ ] hds; delete [ ] hhits; // clean up device memory: hipFree( dvs ); hipFree( dths ); hipFree( dgs ); hipFree( dhs ); hipFree( dds ); hipFree( dhits ); CudaCheckError( ); return 0; } void CudaCheckError( ) { hipError_t e = hipGetLastError( ); if( e != hipSuccess ) { fprintf( stderr, "CUDA failure %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(e) ); } } float Ranf( float low, float high ) { float r = (float) rand(); // 0 - RAND_MAX float t = r / (float) RAND_MAX; // 0. - 1. return low + t * ( high - low ); } int Ranf( int ilow, int ihigh ) { float low = (float)ilow; float high = ceil( (float)ihigh ); return (int) Ranf(low,high); } void TimeOfDaySeed( ) { struct tm y2k = { 0 }; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time_t timer; time( &timer ); double seconds = difftime( timer, mktime(&y2k) ); unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds srand( seed ); }
731dcc358a280e261eff8b3c37a74d6b4d751aaa.cu
#include "exception.h" #include "helper_cuda.h" #include "helper_functions.h" #include "helper_image.h" #include "helper_string.h" #include "helper_timer.h" #ifndef NUMTRIALS #define NUMTRIALS (1024 * 1024) #endif #ifndef BLOCKSIZE #define BLOCKSIZE 512 #endif #ifndef NUMBLOCKS #define NUMBLOCKS (NUMTRIALS/BLOCKSIZE) #endif // ranges for the random numbers: const float GMIN = 20.0; // ground distance in meters const float GMAX = 30.0; // ground distance in meters const float HMIN = 10.0; // cliff height in meters const float HMAX = 40.0; // cliff height in meters const float DMIN = 10.0; // distance to castle in meters const float DMAX = 20.0; // distance to castle in meters const float VMIN = 30.0; // intial cnnonball velocity in meters / sec const float VMAX = 50.0; // intial cnnonball velocity in meters / sec const float THMIN = 70.0; // cannonball launch angle in degrees const float THMAX = 80.0; // cannonball launch angle in degrees const float GRAVITY = -9.8; // acceleraion due to gravity in meters / sec^2 const float TOL = 5.0; // tolerance in cannonball hitting the castle in meters // castle is destroyed if cannonball lands between d-TOL and d+TOL // function prototypes: float Ranf( float, float ); int Ranf( int, int ); void TimeOfDaySeed( ); void CudaCheckError( ); // degrees-to-radians -- callable from the device: __device__ float Radians( float d ) { return (M_PI/180.f) * d; } // the kernel: __global__ void MonteCarlo( float *dvs, float *dths, float *dgs, float *dhs, float *dds, int *dhits ) { unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x; // randomize everything: float v = dvs[gid]; float thr = Radians( dths[gid] ); float vx = v * cos(thr); float vy = v * sin(thr); float g = dgs[gid]; float h = dhs[gid]; float d = dds[gid]; int numHits = 0; // see if the ball doesn't even reach the cliff: float t = -vy / ( 0.5*GRAVITY ); float x = vx * t; if( x > g) { // see if the ball hits the vertical cliff face: t = g/vx; float t_squared = pow(t, 2); float y = vy*t + 0.5 * GRAVITY * t_squared; if( y > h ) { // the ball hits the upper deck: // the time solution for this is a quadratic equation of the form: // at^2 + bt + c = 0. // where 'a' multiplies time^2 // 'b' multiplies time // 'c' is a constant float a = 0.5 * GRAVITY; float b = vy; float c = -h; float disc = b*b - 4.f*a*c; // quadratic formula discriminant // successfully hits the ground above the cliff: // get the intersection: disc = sqrtf( disc ); float t1 = (-b + disc ) / ( 2.f*a ); // time to intersect high ground float t2 = (-b - disc ) / ( 2.f*a ); // time to intersect high ground // only care about the second intersection float tmax = t1; if( t2 > t1 ) tmax = t2; // how far does the ball land horizontlly from the edge of the cliff? float upperDist = vx * tmax - g; // see if the ball hits the castle: if( fabs( upperDist - d ) <= TOL ) { numHits = 1; } } // if ball clears the cliff face } // if ball gets as far as the cliff face dhits[gid] = numHits; } // for( # of monte carlo trials ) // these two #defines are just to label things // other than that, they do nothing: #define IN #define OUT int main( int argc, char* argv[ ] ) { TimeOfDaySeed( ); //int dev = findCudaDevice(argc, (const char **)argv); // better to define these here so that the rand() calls don't get into the thread timing: float *hvs = new float [NUMTRIALS]; float *hths = new float [NUMTRIALS]; float *hgs = new float [NUMTRIALS]; float *hhs = new float [NUMTRIALS]; float *hds = new float [NUMTRIALS]; int *hhits = new int [NUMTRIALS]; // fill the random-value arrays: for( int n = 0; n < NUMTRIALS; n++ ) { hvs[n] = Ranf( VMIN, VMAX ); hths[n] = Ranf( THMIN, THMAX ); hgs[n] = Ranf( GMIN, GMAX ); hhs[n] = Ranf( HMIN, HMAX ); hds[n] = Ranf( DMIN, DMAX ); } // allocate device memory: float *dvs, *dths, *dgs, *dhs, *dds; int *dhits; cudaMalloc( &dvs, NUMTRIALS*sizeof(float) ); cudaMalloc( &dths, NUMTRIALS*sizeof(float) ); cudaMalloc( &dgs, NUMTRIALS*sizeof(float) ); cudaMalloc( &dhs, NUMTRIALS*sizeof(float) ); cudaMalloc( &dds, NUMTRIALS*sizeof(float) ); cudaMalloc( &dhits, NUMTRIALS*sizeof(int) ); CudaCheckError( ); // copy host memory to the device: cudaMemcpy( dvs, hvs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( dths, hths, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( dgs, hgs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( dhs, hhs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( dds, hds, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); CudaCheckError( ); // setup the execution parameters: dim3 grid( NUMBLOCKS, 1, 1 ); dim3 threads( BLOCKSIZE, 1, 1 ); // allocate cuda events that we'll use for timing: cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); CudaCheckError( ); // let the gpu go quiet: cudaDeviceSynchronize( ); // record the start event: cudaEventRecord( start, NULL ); CudaCheckError( ); // execute the kernel: MonteCarlo<<< grid, threads >>>( IN dvs, IN dths, IN dgs, IN dhs, IN dds, OUT dhits ); // record the stop event: cudaEventRecord( stop, NULL ); CudaCheckError( ); // wait for the stop event to complete: cudaDeviceSynchronize( ); cudaEventSynchronize( stop ); CudaCheckError( ); float msecTotal = 0.0f; cudaEventElapsedTime( &msecTotal, start, stop ); CudaCheckError( ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double multsPerSecond = (double)NUMTRIALS / secondsTotal; double megaMultsPerSecond = multsPerSecond / 1000000.; // copy result from the device to the host: cudaMemcpy( hhits, dhits, NUMTRIALS*sizeof(int), cudaMemcpyDeviceToHost ); CudaCheckError( ); // add up the hhits[ ] array: : int total_hits = 0; for( int n = 0; n < NUMTRIALS; n++ ) { total_hits += hhits[n]; } // compute and print the probability: float probability = (float)total_hits/(float)(NUMTRIALS); fprintf( stderr, "%12d\t%4d\t%10.2lf\t%6.2f%%\n", NUMTRIALS, BLOCKSIZE, megaMultsPerSecond, 100.*probability); // clean up host memory: delete [ ] hvs; delete [ ] hths; delete [ ] hgs; delete [ ] hhs; delete [ ] hds; delete [ ] hhits; // clean up device memory: cudaFree( dvs ); cudaFree( dths ); cudaFree( dgs ); cudaFree( dhs ); cudaFree( dds ); cudaFree( dhits ); CudaCheckError( ); return 0; } void CudaCheckError( ) { cudaError_t e = cudaGetLastError( ); if( e != cudaSuccess ) { fprintf( stderr, "CUDA failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e) ); } } float Ranf( float low, float high ) { float r = (float) rand(); // 0 - RAND_MAX float t = r / (float) RAND_MAX; // 0. - 1. return low + t * ( high - low ); } int Ranf( int ilow, int ihigh ) { float low = (float)ilow; float high = ceil( (float)ihigh ); return (int) Ranf(low,high); } void TimeOfDaySeed( ) { struct tm y2k = { 0 }; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time_t timer; time( &timer ); double seconds = difftime( timer, mktime(&y2k) ); unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds srand( seed ); }
ff4599ccc1ffad6fc9573e05f59e82f25dda07ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "convolutional_layer.h" #include "deconvolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "hip/hip_runtime.h" } extern "C" void forward_deconvolutional_layer_gpu(layer l, network_state state) { int i; int out_h = l.out_h; int out_w = l.out_w; int size = out_h*out_w; int m = l.size*l.size*l.n; int n = l.h*l.w; int k = l.c; fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); for(i = 0; i < l.batch; ++i){ float *a = l.weights_gpu; float *b = state.input + i*l.c*l.h*l.w; float *c = state.workspace; gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n); col2im_ongpu(c, l.n, out_h, out_w, l.size, l.stride, l.pad, l.output_gpu+i*l.n*size); } if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, state); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } activate_array_ongpu(l.output_gpu, l.batch*l.n*size, l.activation); } extern "C" void backward_deconvolutional_layer_gpu(layer l, network_state state) { int out_h = l.out_h; int out_w = l.out_w; int size = out_h*out_w; int i; gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, state); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } //if(state.delta) memset(state.delta, 0, l.batch*l.h*l.w*l.c*sizeof(float)); for(i = 0; i < l.batch; ++i){ int m = l.c; int n = l.size*l.size*l.n; int k = l.h*l.w; float *a = state.input + i*m*n; float *b = state.workspace; float *c = l.weight_updates_gpu; im2col_ongpu(l.delta_gpu + i*l.n*size, l.n, out_h, out_w, l.size, l.stride, l.pad, b); gemm_ongpu(0,1,m,n,k,1,a,k,b,k,1,c,n); if(state.delta){ int m = l.c; int n = l.h*l.w; int k = l.size*l.size*l.n; float *a = l.weights_gpu; float *b = state.workspace; float *c = state.delta + i*n*m; gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n); } } } extern "C" void pull_deconvolutional_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } extern "C" void push_deconvolutional_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void update_deconvolutional_layer_gpu(layer l, int batch, float learning_rate, float momentum, float decay) { int size = l.size*l.size*l.c*l.n; axpy_ongpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_ongpu(l.n, momentum, l.bias_updates_gpu, 1); if(l.scales_gpu){ axpy_ongpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_ongpu(l.n, momentum, l.scale_updates_gpu, 1); } axpy_ongpu(size, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_ongpu(size, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_ongpu(size, momentum, l.weight_updates_gpu, 1); }
ff4599ccc1ffad6fc9573e05f59e82f25dda07ab.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "convolutional_layer.h" #include "deconvolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "cuda.h" } extern "C" void forward_deconvolutional_layer_gpu(layer l, network_state state) { int i; int out_h = l.out_h; int out_w = l.out_w; int size = out_h*out_w; int m = l.size*l.size*l.n; int n = l.h*l.w; int k = l.c; fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); for(i = 0; i < l.batch; ++i){ float *a = l.weights_gpu; float *b = state.input + i*l.c*l.h*l.w; float *c = state.workspace; gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n); col2im_ongpu(c, l.n, out_h, out_w, l.size, l.stride, l.pad, l.output_gpu+i*l.n*size); } if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, state); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } activate_array_ongpu(l.output_gpu, l.batch*l.n*size, l.activation); } extern "C" void backward_deconvolutional_layer_gpu(layer l, network_state state) { int out_h = l.out_h; int out_w = l.out_w; int size = out_h*out_w; int i; gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, state); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } //if(state.delta) memset(state.delta, 0, l.batch*l.h*l.w*l.c*sizeof(float)); for(i = 0; i < l.batch; ++i){ int m = l.c; int n = l.size*l.size*l.n; int k = l.h*l.w; float *a = state.input + i*m*n; float *b = state.workspace; float *c = l.weight_updates_gpu; im2col_ongpu(l.delta_gpu + i*l.n*size, l.n, out_h, out_w, l.size, l.stride, l.pad, b); gemm_ongpu(0,1,m,n,k,1,a,k,b,k,1,c,n); if(state.delta){ int m = l.c; int n = l.h*l.w; int k = l.size*l.size*l.n; float *a = l.weights_gpu; float *b = state.workspace; float *c = state.delta + i*n*m; gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n); } } } extern "C" void pull_deconvolutional_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } extern "C" void push_deconvolutional_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void update_deconvolutional_layer_gpu(layer l, int batch, float learning_rate, float momentum, float decay) { int size = l.size*l.size*l.c*l.n; axpy_ongpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_ongpu(l.n, momentum, l.bias_updates_gpu, 1); if(l.scales_gpu){ axpy_ongpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_ongpu(l.n, momentum, l.scale_updates_gpu, 1); } axpy_ongpu(size, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_ongpu(size, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_ongpu(size, momentum, l.weight_updates_gpu, 1); }
631c8ee0b59964b03dde7483b5f6d7404183db31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "host_projection.h" // consists all required package and functions void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { // Macro for input and output #define IN_IMG prhs[0] #define GEO_PARA prhs[1] #define OUT_PROJ plhs[0] int nx, ny, nz, na, nb, numImg, numBytesImg, numSingleProj, numBytesSingleProj; float da, db, ai, bi, SO, SD, angle; // resolutions of volumes if (mxGetField(GEO_PARA, 0, "nx") != NULL) nx = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nx")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nx.\n"); if (mxGetField(GEO_PARA, 0, "ny") != NULL) ny = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "ny")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution ny.\n"); if (mxGetField(GEO_PARA, 0, "nz") != NULL) nz = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nz")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nz.\n"); numImg = nx * ny * nz; // size of image numBytesImg = numImg * sizeof(float); // number of bytes in image // detector plane resolutions if (mxGetField(GEO_PARA, 0, "na") != NULL) na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "na")); else if (mxGetField(GEO_PARA, 0, "nv") != NULL) na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nv")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector in plane, which is denoted as na or nu.\n"); if (mxGetField(GEO_PARA, 0, "nb") != NULL) nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nb")); else if (mxGetField(GEO_PARA, 0, "nu") != NULL) nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nu")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector across plane, which is denoted as nb or nv.\n"); numSingleProj = na * nb; numBytesSingleProj = numSingleProj * sizeof(float); // detector resolution if (mxGetField(GEO_PARA, 0, "da") != NULL) da = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "da")); else{ da = 1.0f; mexPrintf("Automatically set detector cell size da to 1. \n"); mexPrintf("If don't want that default value, please set para.da manually.\n"); } if (mxGetField(GEO_PARA, 0, "db") != NULL) db = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "db")); else{ db = 1.0f; mexPrintf("Automatically set detectof cell size db to 1. \n"); mexPrintf("If don't want that default value, please set para.db manually.\n"); } // detector plane offset from centered calibrations if (mxGetField(GEO_PARA, 0, "ai") != NULL){ ai = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "ai")); ai -= (float)na / 2 - 0.5f; } else{ mexPrintf("Automatically set detector offset ai to 0. \n"); mexPrintf("If don't want that default value, please set para.ai manually.\n"); ai = - (float)na / 2 + 0.5f; } if (mxGetField(GEO_PARA, 0, "bi") != NULL){ bi = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "bi")); if (bi > -1) bi -= (float)nb / 2 - 0.5f; } else{ mexPrintf("Automatically set detector offset bi to 0. \n"); mexPrintf("If don't want that default value, please set para.bi manually.\n"); bi = - (float)nb / 2 + 0.5f; } if (mxGetField(GEO_PARA, 0, "SO") != NULL) SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SO")); else if (mxGetField(GEO_PARA, 0, "SI") != NULL) SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SI")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and isocenter, which is denoted with para.SO or para.DI.\n"); if (mxGetField(GEO_PARA, 0, "SD") != NULL) SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SD")); else if (mxGetField(GEO_PARA, 0, "DI") != NULL) SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "DI")) + SO; else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and detector plane, which is denoted with para.SD or para.SI + para.DI.\n"); if (mxGetField(GEO_PARA, 0, "angle") != NULL) angle = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "angle")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid projection angle, which is denoted with para.angle.\n"); float *d_img, *d_proj; hipMalloc((void**)&d_img, nx * ny * nz * sizeof(float)); float *h_img; h_img = (float*)mxGetData(IN_IMG); hipMemcpy(d_img, h_img, nx * ny * nz * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**)&d_proj, na * nb * sizeof(float)); const dim3 gridSize_singleProj((na + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (nb + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, 1); const dim3 blockSize(BLOCKSIZE_X,BLOCKSIZE_Y, 1); hipLaunchKernelGGL(( kernel_projection), dim3(gridSize_singleProj), dim3(blockSize), 0, 0, d_proj, d_img, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz); hipDeviceSynchronize(); OUT_PROJ = mxCreateNumericMatrix(0, 0, mxSINGLE_CLASS, mxREAL); const mwSize outDim[2] = {(mwSize)na, (mwSize)nb}; mxSetDimensions(OUT_PROJ, outDim, 2); mxSetData(OUT_PROJ, mxMalloc(na * nb * sizeof(float))); float *h_outproj = (float*)mxGetData(OUT_PROJ); hipMemcpy(h_outproj, d_proj, numBytesSingleProj, hipMemcpyDeviceToHost); hipFree(d_proj); hipFree(d_img); hipDeviceReset(); return; }
631c8ee0b59964b03dde7483b5f6d7404183db31.cu
#include "host_projection.h" // consists all required package and functions void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { // Macro for input and output #define IN_IMG prhs[0] #define GEO_PARA prhs[1] #define OUT_PROJ plhs[0] int nx, ny, nz, na, nb, numImg, numBytesImg, numSingleProj, numBytesSingleProj; float da, db, ai, bi, SO, SD, angle; // resolutions of volumes if (mxGetField(GEO_PARA, 0, "nx") != NULL) nx = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nx")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nx.\n"); if (mxGetField(GEO_PARA, 0, "ny") != NULL) ny = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "ny")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution ny.\n"); if (mxGetField(GEO_PARA, 0, "nz") != NULL) nz = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nz")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nz.\n"); numImg = nx * ny * nz; // size of image numBytesImg = numImg * sizeof(float); // number of bytes in image // detector plane resolutions if (mxGetField(GEO_PARA, 0, "na") != NULL) na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "na")); else if (mxGetField(GEO_PARA, 0, "nv") != NULL) na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nv")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector in plane, which is denoted as na or nu.\n"); if (mxGetField(GEO_PARA, 0, "nb") != NULL) nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nb")); else if (mxGetField(GEO_PARA, 0, "nu") != NULL) nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nu")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector across plane, which is denoted as nb or nv.\n"); numSingleProj = na * nb; numBytesSingleProj = numSingleProj * sizeof(float); // detector resolution if (mxGetField(GEO_PARA, 0, "da") != NULL) da = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "da")); else{ da = 1.0f; mexPrintf("Automatically set detector cell size da to 1. \n"); mexPrintf("If don't want that default value, please set para.da manually.\n"); } if (mxGetField(GEO_PARA, 0, "db") != NULL) db = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "db")); else{ db = 1.0f; mexPrintf("Automatically set detectof cell size db to 1. \n"); mexPrintf("If don't want that default value, please set para.db manually.\n"); } // detector plane offset from centered calibrations if (mxGetField(GEO_PARA, 0, "ai") != NULL){ ai = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "ai")); ai -= (float)na / 2 - 0.5f; } else{ mexPrintf("Automatically set detector offset ai to 0. \n"); mexPrintf("If don't want that default value, please set para.ai manually.\n"); ai = - (float)na / 2 + 0.5f; } if (mxGetField(GEO_PARA, 0, "bi") != NULL){ bi = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "bi")); if (bi > -1) bi -= (float)nb / 2 - 0.5f; } else{ mexPrintf("Automatically set detector offset bi to 0. \n"); mexPrintf("If don't want that default value, please set para.bi manually.\n"); bi = - (float)nb / 2 + 0.5f; } if (mxGetField(GEO_PARA, 0, "SO") != NULL) SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SO")); else if (mxGetField(GEO_PARA, 0, "SI") != NULL) SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SI")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and isocenter, which is denoted with para.SO or para.DI.\n"); if (mxGetField(GEO_PARA, 0, "SD") != NULL) SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SD")); else if (mxGetField(GEO_PARA, 0, "DI") != NULL) SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "DI")) + SO; else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and detector plane, which is denoted with para.SD or para.SI + para.DI.\n"); if (mxGetField(GEO_PARA, 0, "angle") != NULL) angle = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "angle")); else mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid projection angle, which is denoted with para.angle.\n"); float *d_img, *d_proj; cudaMalloc((void**)&d_img, nx * ny * nz * sizeof(float)); float *h_img; h_img = (float*)mxGetData(IN_IMG); cudaMemcpy(d_img, h_img, nx * ny * nz * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_proj, na * nb * sizeof(float)); const dim3 gridSize_singleProj((na + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (nb + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, 1); const dim3 blockSize(BLOCKSIZE_X,BLOCKSIZE_Y, 1); kernel_projection<<<gridSize_singleProj, blockSize>>>(d_proj, d_img, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz); cudaDeviceSynchronize(); OUT_PROJ = mxCreateNumericMatrix(0, 0, mxSINGLE_CLASS, mxREAL); const mwSize outDim[2] = {(mwSize)na, (mwSize)nb}; mxSetDimensions(OUT_PROJ, outDim, 2); mxSetData(OUT_PROJ, mxMalloc(na * nb * sizeof(float))); float *h_outproj = (float*)mxGetData(OUT_PROJ); cudaMemcpy(h_outproj, d_proj, numBytesSingleProj, cudaMemcpyDeviceToHost); cudaFree(d_proj); cudaFree(d_img); cudaDeviceReset(); return; }
de5a9c8aeb5bb3fd9538c537165e4e8d5eb9b5c7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <time.h> #define DATA_SIZE 1048576 #define THREAD_NUM 256 #define BLOCK_NUM 32 bool InitCUDA() { int count; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } void GenerateNumbers(int *number, int size) { for(int i = 0; i < size; i++) { number[i] = rand() % 10; } } __global__ static void sumOfSquares(int *num, int* result, clock_t* time) { const int tid = threadIdx.x; const int bid = blockIdx.x; int sum = 0; int i; if(tid == 0) time[bid] = clock(); for(i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) { sum += num[i] * num[i]; } result[bid * THREAD_NUM + tid] = sum; if(tid == 0) time[bid + BLOCK_NUM] = clock(); } int main() { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); int data[DATA_SIZE]; GenerateNumbers(data, DATA_SIZE); int* gpudata, *result; clock_t* time; clock_t start_g, stop_g; start_g = clock(); hipMalloc((void**) &gpudata, sizeof(int) * DATA_SIZE); hipMalloc((void**) &result, sizeof(int) * THREAD_NUM * BLOCK_NUM); hipMalloc((void**) &time, sizeof(clock_t) * BLOCK_NUM * 2); hipMemcpy(gpudata, data, sizeof(int) * DATA_SIZE, hipMemcpyHostToDevice); hipLaunchKernelGGL(( sumOfSquares), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, gpudata, result, time); int sum[THREAD_NUM * BLOCK_NUM]; clock_t time_used[BLOCK_NUM * 2]; hipMemcpy(&sum, result, sizeof(int) * THREAD_NUM * BLOCK_NUM, hipMemcpyDeviceToHost); hipMemcpy(&time_used, time, sizeof(clock_t) * BLOCK_NUM * 2, hipMemcpyDeviceToHost); hipFree(gpudata); hipFree(result); hipFree(time); stop_g = (clock() - start_g); int final_num = 0; for(int i = 0; i < THREAD_NUM * BLOCK_NUM; i++){ final_num += sum[i]; } clock_t min_start, max_end; min_start = time_used[0]; max_end = time_used[BLOCK_NUM]; for(int i = 1; i < BLOCK_NUM; i++){ if(min_start > time_used[i]) min_start = time_used[i]; if(max_end < time_used[i+BLOCK_NUM]) max_end = time_used[i + BLOCK_NUM]; } printf("sum (GPU): %d time: %f timeg: %f \n", final_num, (double)(max_end - min_start) / CLOCKS_PER_SEC, (double) stop_g / CLOCKS_PER_SEC); clock_t start, stop; start = clock(); final_num = 0; for(int i = 0; i < DATA_SIZE; i++) { final_num += data[i] * data[i]; } stop = clock() - start; printf("sum (CPU): %d time: %f \n", final_num, (double)stop / CLOCKS_PER_SEC); return 0; }
de5a9c8aeb5bb3fd9538c537165e4e8d5eb9b5c7.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <time.h> #define DATA_SIZE 1048576 #define THREAD_NUM 256 #define BLOCK_NUM 32 bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } void GenerateNumbers(int *number, int size) { for(int i = 0; i < size; i++) { number[i] = rand() % 10; } } __global__ static void sumOfSquares(int *num, int* result, clock_t* time) { const int tid = threadIdx.x; const int bid = blockIdx.x; int sum = 0; int i; if(tid == 0) time[bid] = clock(); for(i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) { sum += num[i] * num[i]; } result[bid * THREAD_NUM + tid] = sum; if(tid == 0) time[bid + BLOCK_NUM] = clock(); } int main() { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); int data[DATA_SIZE]; GenerateNumbers(data, DATA_SIZE); int* gpudata, *result; clock_t* time; clock_t start_g, stop_g; start_g = clock(); cudaMalloc((void**) &gpudata, sizeof(int) * DATA_SIZE); cudaMalloc((void**) &result, sizeof(int) * THREAD_NUM * BLOCK_NUM); cudaMalloc((void**) &time, sizeof(clock_t) * BLOCK_NUM * 2); cudaMemcpy(gpudata, data, sizeof(int) * DATA_SIZE, cudaMemcpyHostToDevice); sumOfSquares<<<BLOCK_NUM, THREAD_NUM, 0>>>(gpudata, result, time); int sum[THREAD_NUM * BLOCK_NUM]; clock_t time_used[BLOCK_NUM * 2]; cudaMemcpy(&sum, result, sizeof(int) * THREAD_NUM * BLOCK_NUM, cudaMemcpyDeviceToHost); cudaMemcpy(&time_used, time, sizeof(clock_t) * BLOCK_NUM * 2, cudaMemcpyDeviceToHost); cudaFree(gpudata); cudaFree(result); cudaFree(time); stop_g = (clock() - start_g); int final_num = 0; for(int i = 0; i < THREAD_NUM * BLOCK_NUM; i++){ final_num += sum[i]; } clock_t min_start, max_end; min_start = time_used[0]; max_end = time_used[BLOCK_NUM]; for(int i = 1; i < BLOCK_NUM; i++){ if(min_start > time_used[i]) min_start = time_used[i]; if(max_end < time_used[i+BLOCK_NUM]) max_end = time_used[i + BLOCK_NUM]; } printf("sum (GPU): %d time: %f timeg: %f \n", final_num, (double)(max_end - min_start) / CLOCKS_PER_SEC, (double) stop_g / CLOCKS_PER_SEC); clock_t start, stop; start = clock(); final_num = 0; for(int i = 0; i < DATA_SIZE; i++) { final_num += data[i] * data[i]; } stop = clock() - start; printf("sum (CPU): %d time: %f \n", final_num, (double)stop / CLOCKS_PER_SEC); return 0; }
6d4a261717669dcdb726f9349086560b32bd8298.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014, Stephen C. Sewell // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON // ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //////////////////////////////////////////////////////////////////////////////// #include <hiprand/hiprand.h> #include <fstream> #include <iostream> #include <thrust/sort.h> #include "cpu_only.h" #include "dev_mem.h" #include "device_stats.h" #include "precisiontimer.h" #include "sort_thread.h" #define NX1 512 #define NY1 10000 struct Particle_t { float2 pos; float3 vel; }; __host__ __device__ bool operator<(const Particle_t& lhs, const Particle_t& rhs) { unsigned int lhsX = static_cast<unsigned int>(lhs.pos.x); unsigned int lhsY = static_cast<unsigned int>(lhs.pos.y); unsigned int rhsX = static_cast<unsigned int>(rhs.pos.x); unsigned int rhsY = static_cast<unsigned int>(rhs.pos.y); bool ret = false; if(lhsY < rhsY) { ret = true; } else if(lhsY == rhsY) { if(lhsX < rhsX) { ret = true; } } return ret; } __global__ void initParticles(float2* pos, const float *randArray, unsigned int numParticles) { unsigned int threadX = blockDim.x * blockIdx.x + threadIdx.x; if(threadX < numParticles) { pos[threadX].x = NX1 * randArray[threadX]; pos[threadX].y = NY1 * randArray[numParticles+threadX]; } } __global__ void initParticles(Particle_t* particle, const float *randArray, unsigned int numParticles) { unsigned int threadX = blockDim.x * blockIdx.x + threadIdx.x; if(threadX < numParticles) { particle[threadX].pos.x = NX1 * randArray[threadX]; particle[threadX].pos.y = NY1 * randArray[numParticles+threadX]; } } __global__ void binParticles(const float2* pos, unsigned int* bins, unsigned int numParticles) { unsigned int threadX = blockDim.x * blockIdx.x + threadIdx.x; if(threadX < numParticles) { bins[threadX] = NX1 * pos[threadX].y + pos[threadX].x; } } void restorePositions(DevMem<float2> &pos, const DevMem<float> randArray, unsigned int numParticles) { pos.resize(numParticles); const unsigned int threadsPerBlock = 512; const unsigned int numBlocks = (numParticles + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( initParticles), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, pos.getPtr(), randArray.getPtr(), numParticles); checkCuda(hipGetLastError()); checkCuda(hipDeviceSynchronize()); } void restoreBins(DevMem<unsigned int> &bins, const DevMem<float2> &pos, unsigned int numParticles) { bins.resize(numParticles); const unsigned int threadsPerBlock = 512; const unsigned int numBlocks = (numParticles + threadsPerBlock - 1) / threadsPerBlock; checkCuda(hipGetLastError()); hipLaunchKernelGGL(( binParticles), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, pos.getPtr(), bins.getPtr(), numParticles); checkCuda(hipDeviceSynchronize()); } void timeSorts(const unsigned int numParticles, double& gpuIntBin, double& gpuCustomOp, double& timeSortClass, double& gpuFullSortInt, double& timeDevToHost, double &timeHostToDev, double& timeTbbSort) { PrecisionTimer t; const int neededRands = numParticles * 2; DevMem<float> randArray(neededRands); PrecisionTimer timer; hiprandGenerator_t randGenerator; hiprandCreateGenerator (&randGenerator, HIPRAND_RNG_PSEUDO_MTGP32); hiprandSetPseudoRandomGeneratorSeed(randGenerator, 1); hiprandGenerateUniform(randGenerator, randArray.getPtr(), neededRands); hiprandDestroyGenerator(randGenerator); ///////////////////////////////////////////////////////////////////////////// // Memcpy To Host Timing { DevMem<float2> pos(numParticles); DevMem<float3> fakeVelocities(numParticles); restorePositions(pos, randArray, numParticles); checkCuda(hipDeviceSynchronize()); HostMem<SortThread::Particle> cpuParticles(numParticles); DevStream readStream; t.start(); checkCuda(hipMemcpy2DAsync(&cpuParticles[0].pos, sizeof(SortThread::Particle), pos.getPtr(), sizeof(float2), sizeof(float2), numParticles, hipMemcpyDeviceToHost, *readStream)); checkCuda(hipMemcpy2DAsync(&cpuParticles[0].vel, sizeof(SortThread::Particle), fakeVelocities.getPtr(), sizeof(float3), sizeof(float3), numParticles, hipMemcpyDeviceToHost, *readStream)); readStream.synchronize(); t.stop(); timeDevToHost = t.intervalInNanoS() / 1000000; ///////////////////////////////////////////////////////////////////////////// // TBB Sort Timing unsigned int numOutOfOrder = 0; for(std::size_t i = 1; i < cpuParticles.size(); i++) { if(cpuParticles[i-1] < cpuParticles[i]) { numOutOfOrder++; break; } } assert(numOutOfOrder > 0); checkCuda(hipDeviceSynchronize()); t.start(); cpuSort(cpuParticles); t.stop(); checkCuda(hipDeviceSynchronize()); timeTbbSort = t.intervalInNanoS() / 1000000; assert(timeTbbSort == t.intervalInMilliS()); for(std::size_t i = 1; i < cpuParticles.size(); i++) { assert(cpuParticles[i-1] < cpuParticles[i]); } ///////////////////////////////////////////////////////////////////////////// // Memcpy To Device Timing DevStream writeStream; t.start(); checkCuda(hipMemcpy2DAsync(pos.getPtr(), sizeof(float2), &cpuParticles[0].pos, sizeof(SortThread::Particle), sizeof(float2), numParticles, hipMemcpyHostToDevice, *writeStream)); checkCuda(hipMemcpy2DAsync(fakeVelocities.getPtr(), sizeof(float3), &cpuParticles[0].vel, sizeof(SortThread::Particle), sizeof(float3), numParticles, hipMemcpyHostToDevice, *writeStream)); writeStream.synchronize(); t.stop(); timeHostToDev = t.intervalInNanoS() / 1000000; } ///////////////////////////////////////////////////////////////////////////// // Bin Sort Timing { DevMem<float2> pos(numParticles); DevMem<unsigned int> bins(numParticles); restorePositions(pos, randArray, numParticles); restoreBins(bins, pos, numParticles); t.start(); thrust::sort_by_key(bins.getThrustPtr(), bins.getThrustPtr()+numParticles, pos.getThrustPtr()); checkCuda(hipDeviceSynchronize()); t.stop(); gpuIntBin = t.intervalInNanoS() / 1000000; } ///////////////////////////////////////////////////////////////////////////// // Custom operator< timing { DevMem<Particle_t> particles(numParticles); const unsigned int threadsPerBlock = 512; const unsigned int numBlocks = (numParticles + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( initParticles), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, particles.getPtr(), randArray.getPtr(), numParticles); checkCuda(hipDeviceSynchronize()); t.start(); thrust::sort(particles.getThrustPtr(), particles.getThrustPtr()+numParticles); checkCuda(hipDeviceSynchronize()); t.stop(); gpuCustomOp = t.intervalInNanoS() / 1000000; } ///////////////////////////////////////////////////////////////////////////// // Sort Thread Class Timing { SortThread sortThread(NY1 + 1000); sortThread.setNumSortThreads(1); sortThread.disableParticleElimination(); sortThread.run(); DevMem<float2> pos(numParticles); restorePositions(pos, randArray, numParticles); DevMem<float3> fakeVelocities(numParticles); t.start(); sortThread.sortAsync(pos, fakeVelocities, numParticles); sortThread.waitForSort(pos, fakeVelocities); t.stop(); timeSortClass = t.intervalInNanoS() / 1000000; sortThread.join(); } ///////////////////////////////////////////////////////////////////////////// // Double bin sort timing // I need another copy of this; one to sort pos and one to sort vel { DevMem<float2> pos(numParticles); restorePositions(pos, randArray, numParticles); DevMem<float3> fakeVelocities(numParticles); DevMem<unsigned int> bins(numParticles); restoreBins(bins, pos, numParticles); DevMem<unsigned int> bins2 = bins; t.start(); thrust::sort_by_key(bins.getThrustPtr(), bins.getThrustPtr()+numParticles, pos.getThrustPtr()); thrust::sort_by_key(bins2.getThrustPtr(), bins2.getThrustPtr()+numParticles, fakeVelocities.getThrustPtr()); checkCuda(hipDeviceSynchronize()); t.stop(); gpuFullSortInt = t.intervalInNanoS() / 1000000; } } int main() { DeviceStats &device = DeviceStats::getRef(); const int maxParticles = 5000000; double timeBin; double timeFull; double cpuSort; double timeFullInt; double memcpyToHost; double memcpyToDevice; double timeTbbSort; std::ofstream sortTimes("sortTimes.txt"); sortTimes << "numParticles,binSortTime(ms),fullSortTime(ms),fullSortWithIntTime(ms)" << ",memcpyToHost,timTbbSort,memcpyToDevice" << std::endl; for(int i = 100000; i <= maxParticles; i+=100000) { timeSorts(i, timeBin, timeFull, cpuSort, timeFullInt, memcpyToHost, memcpyToDevice, timeTbbSort); std::cout << i << " particles; binSort: " << timeBin << "ms fullSort: " << timeFull << "ms cpuSort: " << cpuSort << " ms gpuFullSortIntKeys: " << timeFullInt << " ms" << std::endl; sortTimes << i << "," << timeBin << "," << timeFull << "," << cpuSort << "," << timeFullInt << "," << memcpyToHost << "," << timeTbbSort << "," << memcpyToDevice << std::endl; } return 0; }
6d4a261717669dcdb726f9349086560b32bd8298.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014, Stephen C. Sewell // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON // ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //////////////////////////////////////////////////////////////////////////////// #include <curand.h> #include <fstream> #include <iostream> #include <thrust/sort.h> #include "cpu_only.h" #include "dev_mem.h" #include "device_stats.h" #include "precisiontimer.h" #include "sort_thread.h" #define NX1 512 #define NY1 10000 struct Particle_t { float2 pos; float3 vel; }; __host__ __device__ bool operator<(const Particle_t& lhs, const Particle_t& rhs) { unsigned int lhsX = static_cast<unsigned int>(lhs.pos.x); unsigned int lhsY = static_cast<unsigned int>(lhs.pos.y); unsigned int rhsX = static_cast<unsigned int>(rhs.pos.x); unsigned int rhsY = static_cast<unsigned int>(rhs.pos.y); bool ret = false; if(lhsY < rhsY) { ret = true; } else if(lhsY == rhsY) { if(lhsX < rhsX) { ret = true; } } return ret; } __global__ void initParticles(float2* pos, const float *randArray, unsigned int numParticles) { unsigned int threadX = blockDim.x * blockIdx.x + threadIdx.x; if(threadX < numParticles) { pos[threadX].x = NX1 * randArray[threadX]; pos[threadX].y = NY1 * randArray[numParticles+threadX]; } } __global__ void initParticles(Particle_t* particle, const float *randArray, unsigned int numParticles) { unsigned int threadX = blockDim.x * blockIdx.x + threadIdx.x; if(threadX < numParticles) { particle[threadX].pos.x = NX1 * randArray[threadX]; particle[threadX].pos.y = NY1 * randArray[numParticles+threadX]; } } __global__ void binParticles(const float2* pos, unsigned int* bins, unsigned int numParticles) { unsigned int threadX = blockDim.x * blockIdx.x + threadIdx.x; if(threadX < numParticles) { bins[threadX] = NX1 * pos[threadX].y + pos[threadX].x; } } void restorePositions(DevMem<float2> &pos, const DevMem<float> randArray, unsigned int numParticles) { pos.resize(numParticles); const unsigned int threadsPerBlock = 512; const unsigned int numBlocks = (numParticles + threadsPerBlock - 1) / threadsPerBlock; initParticles<<<numBlocks, threadsPerBlock>>>(pos.getPtr(), randArray.getPtr(), numParticles); checkCuda(cudaGetLastError()); checkCuda(cudaDeviceSynchronize()); } void restoreBins(DevMem<unsigned int> &bins, const DevMem<float2> &pos, unsigned int numParticles) { bins.resize(numParticles); const unsigned int threadsPerBlock = 512; const unsigned int numBlocks = (numParticles + threadsPerBlock - 1) / threadsPerBlock; checkCuda(cudaGetLastError()); binParticles<<<numBlocks, threadsPerBlock>>>(pos.getPtr(), bins.getPtr(), numParticles); checkCuda(cudaDeviceSynchronize()); } void timeSorts(const unsigned int numParticles, double& gpuIntBin, double& gpuCustomOp, double& timeSortClass, double& gpuFullSortInt, double& timeDevToHost, double &timeHostToDev, double& timeTbbSort) { PrecisionTimer t; const int neededRands = numParticles * 2; DevMem<float> randArray(neededRands); PrecisionTimer timer; curandGenerator_t randGenerator; curandCreateGenerator (&randGenerator, CURAND_RNG_PSEUDO_MTGP32); curandSetPseudoRandomGeneratorSeed(randGenerator, 1); curandGenerateUniform(randGenerator, randArray.getPtr(), neededRands); curandDestroyGenerator(randGenerator); ///////////////////////////////////////////////////////////////////////////// // Memcpy To Host Timing { DevMem<float2> pos(numParticles); DevMem<float3> fakeVelocities(numParticles); restorePositions(pos, randArray, numParticles); checkCuda(cudaDeviceSynchronize()); HostMem<SortThread::Particle> cpuParticles(numParticles); DevStream readStream; t.start(); checkCuda(cudaMemcpy2DAsync(&cpuParticles[0].pos, sizeof(SortThread::Particle), pos.getPtr(), sizeof(float2), sizeof(float2), numParticles, cudaMemcpyDeviceToHost, *readStream)); checkCuda(cudaMemcpy2DAsync(&cpuParticles[0].vel, sizeof(SortThread::Particle), fakeVelocities.getPtr(), sizeof(float3), sizeof(float3), numParticles, cudaMemcpyDeviceToHost, *readStream)); readStream.synchronize(); t.stop(); timeDevToHost = t.intervalInNanoS() / 1000000; ///////////////////////////////////////////////////////////////////////////// // TBB Sort Timing unsigned int numOutOfOrder = 0; for(std::size_t i = 1; i < cpuParticles.size(); i++) { if(cpuParticles[i-1] < cpuParticles[i]) { numOutOfOrder++; break; } } assert(numOutOfOrder > 0); checkCuda(cudaDeviceSynchronize()); t.start(); cpuSort(cpuParticles); t.stop(); checkCuda(cudaDeviceSynchronize()); timeTbbSort = t.intervalInNanoS() / 1000000; assert(timeTbbSort == t.intervalInMilliS()); for(std::size_t i = 1; i < cpuParticles.size(); i++) { assert(cpuParticles[i-1] < cpuParticles[i]); } ///////////////////////////////////////////////////////////////////////////// // Memcpy To Device Timing DevStream writeStream; t.start(); checkCuda(cudaMemcpy2DAsync(pos.getPtr(), sizeof(float2), &cpuParticles[0].pos, sizeof(SortThread::Particle), sizeof(float2), numParticles, cudaMemcpyHostToDevice, *writeStream)); checkCuda(cudaMemcpy2DAsync(fakeVelocities.getPtr(), sizeof(float3), &cpuParticles[0].vel, sizeof(SortThread::Particle), sizeof(float3), numParticles, cudaMemcpyHostToDevice, *writeStream)); writeStream.synchronize(); t.stop(); timeHostToDev = t.intervalInNanoS() / 1000000; } ///////////////////////////////////////////////////////////////////////////// // Bin Sort Timing { DevMem<float2> pos(numParticles); DevMem<unsigned int> bins(numParticles); restorePositions(pos, randArray, numParticles); restoreBins(bins, pos, numParticles); t.start(); thrust::sort_by_key(bins.getThrustPtr(), bins.getThrustPtr()+numParticles, pos.getThrustPtr()); checkCuda(cudaDeviceSynchronize()); t.stop(); gpuIntBin = t.intervalInNanoS() / 1000000; } ///////////////////////////////////////////////////////////////////////////// // Custom operator< timing { DevMem<Particle_t> particles(numParticles); const unsigned int threadsPerBlock = 512; const unsigned int numBlocks = (numParticles + threadsPerBlock - 1) / threadsPerBlock; initParticles<<<numBlocks, threadsPerBlock>>>(particles.getPtr(), randArray.getPtr(), numParticles); checkCuda(cudaDeviceSynchronize()); t.start(); thrust::sort(particles.getThrustPtr(), particles.getThrustPtr()+numParticles); checkCuda(cudaDeviceSynchronize()); t.stop(); gpuCustomOp = t.intervalInNanoS() / 1000000; } ///////////////////////////////////////////////////////////////////////////// // Sort Thread Class Timing { SortThread sortThread(NY1 + 1000); sortThread.setNumSortThreads(1); sortThread.disableParticleElimination(); sortThread.run(); DevMem<float2> pos(numParticles); restorePositions(pos, randArray, numParticles); DevMem<float3> fakeVelocities(numParticles); t.start(); sortThread.sortAsync(pos, fakeVelocities, numParticles); sortThread.waitForSort(pos, fakeVelocities); t.stop(); timeSortClass = t.intervalInNanoS() / 1000000; sortThread.join(); } ///////////////////////////////////////////////////////////////////////////// // Double bin sort timing // I need another copy of this; one to sort pos and one to sort vel { DevMem<float2> pos(numParticles); restorePositions(pos, randArray, numParticles); DevMem<float3> fakeVelocities(numParticles); DevMem<unsigned int> bins(numParticles); restoreBins(bins, pos, numParticles); DevMem<unsigned int> bins2 = bins; t.start(); thrust::sort_by_key(bins.getThrustPtr(), bins.getThrustPtr()+numParticles, pos.getThrustPtr()); thrust::sort_by_key(bins2.getThrustPtr(), bins2.getThrustPtr()+numParticles, fakeVelocities.getThrustPtr()); checkCuda(cudaDeviceSynchronize()); t.stop(); gpuFullSortInt = t.intervalInNanoS() / 1000000; } } int main() { DeviceStats &device = DeviceStats::getRef(); const int maxParticles = 5000000; double timeBin; double timeFull; double cpuSort; double timeFullInt; double memcpyToHost; double memcpyToDevice; double timeTbbSort; std::ofstream sortTimes("sortTimes.txt"); sortTimes << "numParticles,binSortTime(ms),fullSortTime(ms),fullSortWithIntTime(ms)" << ",memcpyToHost,timTbbSort,memcpyToDevice" << std::endl; for(int i = 100000; i <= maxParticles; i+=100000) { timeSorts(i, timeBin, timeFull, cpuSort, timeFullInt, memcpyToHost, memcpyToDevice, timeTbbSort); std::cout << i << " particles; binSort: " << timeBin << "ms fullSort: " << timeFull << "ms cpuSort: " << cpuSort << " ms gpuFullSortIntKeys: " << timeFullInt << " ms" << std::endl; sortTimes << i << "," << timeBin << "," << timeFull << "," << cpuSort << "," << timeFullInt << "," << memcpyToHost << "," << timeTbbSort << "," << memcpyToDevice << std::endl; } return 0; }
5877152a3001de7cb6b4f2a3211d9a2e7aecfb31.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ge_erfc_inv.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int fd = 1; const REAL *a = NULL; hipMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; hipMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ge_erfc_inv), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ge_erfc_inv), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ge_erfc_inv), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5877152a3001de7cb6b4f2a3211d9a2e7aecfb31.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ge_erfc_inv.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int fd = 1; const REAL *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ge_erfc_inv<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ge_erfc_inv<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ge_erfc_inv<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9b4fc2fe5140a527f152a2da31caf6e3b8ab2058.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void convert2DVectorToAngleMagnitude_kernel( uchar4 *d_angle_image, uchar4 *d_magnitude_image, float *d_vector_X, float *d_vector_Y, int width, int height, float lower_ang, float upper_ang, float lower_mag, float upper_mag) { const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y; uchar4 temp_angle, temp_magnitude; if (x < width && y < height) { float vector_X = d_vector_X[__mul24(y, width) + x]; float vector_Y = d_vector_Y[__mul24(y, width) + x]; // compute angle and magnitude float angle = atan2f(vector_Y, vector_X); float magnitude = vector_X * vector_X + vector_Y * vector_Y; magnitude = sqrtf(magnitude); // first draw unmatched pixels in white if (!isfinite(magnitude)) { temp_angle.x = 255; temp_angle.y = 255; temp_angle.z = 255; temp_angle.w = 255; temp_magnitude.x = 255; temp_magnitude.y = 255; temp_magnitude.z = 255; temp_magnitude.w = 255; } else { // rescale angle and magnitude from [lower,upper] to [0,1] and convert to // RGBA jet colorspace angle -= lower_ang; angle /= (upper_ang - lower_ang); float r = 1.0f; float g = 1.0f; float b = 1.0f; if (angle < 0.25f) { r = 0; g = 4.0f * angle; } else if (angle < 0.5f) { r = 0; b = 1.0 + 4.0f * (0.25f - angle); } else if (angle < 0.75f) { r = 4.0f * (angle - 0.5f); b = 0; } else { g = 1.0f + 4.0f * (0.75f - angle); b = 0; } temp_angle.x = 255.0 * r; temp_angle.y = 255.0 * g; temp_angle.z = 255.0 * b; temp_angle.w = 255; magnitude -= lower_mag; magnitude /= (upper_mag - lower_mag); r = 1.0f; g = 1.0f; b = 1.0f; if (magnitude < 0.25f) { r = 0; g = 4.0f * magnitude; } else if (magnitude < 0.5f) { r = 0; b = 1.0 + 4.0f * (0.25f - magnitude); } else if (magnitude < 0.75f) { r = 4.0f * (magnitude - 0.5f); b = 0; } else { g = 1.0f + 4.0f * (0.75f - magnitude); b = 0; } temp_magnitude.x = 255.0 * r; temp_magnitude.y = 255.0 * g; temp_magnitude.z = 255.0 * b; temp_magnitude.w = 255; } d_angle_image[__mul24(y, width) + x] = temp_angle; d_magnitude_image[__mul24(y, width) + x] = temp_magnitude; } }
9b4fc2fe5140a527f152a2da31caf6e3b8ab2058.cu
#include "includes.h" __global__ void convert2DVectorToAngleMagnitude_kernel( uchar4 *d_angle_image, uchar4 *d_magnitude_image, float *d_vector_X, float *d_vector_Y, int width, int height, float lower_ang, float upper_ang, float lower_mag, float upper_mag) { const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y; uchar4 temp_angle, temp_magnitude; if (x < width && y < height) { float vector_X = d_vector_X[__mul24(y, width) + x]; float vector_Y = d_vector_Y[__mul24(y, width) + x]; // compute angle and magnitude float angle = atan2f(vector_Y, vector_X); float magnitude = vector_X * vector_X + vector_Y * vector_Y; magnitude = sqrtf(magnitude); // first draw unmatched pixels in white if (!isfinite(magnitude)) { temp_angle.x = 255; temp_angle.y = 255; temp_angle.z = 255; temp_angle.w = 255; temp_magnitude.x = 255; temp_magnitude.y = 255; temp_magnitude.z = 255; temp_magnitude.w = 255; } else { // rescale angle and magnitude from [lower,upper] to [0,1] and convert to // RGBA jet colorspace angle -= lower_ang; angle /= (upper_ang - lower_ang); float r = 1.0f; float g = 1.0f; float b = 1.0f; if (angle < 0.25f) { r = 0; g = 4.0f * angle; } else if (angle < 0.5f) { r = 0; b = 1.0 + 4.0f * (0.25f - angle); } else if (angle < 0.75f) { r = 4.0f * (angle - 0.5f); b = 0; } else { g = 1.0f + 4.0f * (0.75f - angle); b = 0; } temp_angle.x = 255.0 * r; temp_angle.y = 255.0 * g; temp_angle.z = 255.0 * b; temp_angle.w = 255; magnitude -= lower_mag; magnitude /= (upper_mag - lower_mag); r = 1.0f; g = 1.0f; b = 1.0f; if (magnitude < 0.25f) { r = 0; g = 4.0f * magnitude; } else if (magnitude < 0.5f) { r = 0; b = 1.0 + 4.0f * (0.25f - magnitude); } else if (magnitude < 0.75f) { r = 4.0f * (magnitude - 0.5f); b = 0; } else { g = 1.0f + 4.0f * (0.75f - magnitude); b = 0; } temp_magnitude.x = 255.0 * r; temp_magnitude.y = 255.0 * g; temp_magnitude.z = 255.0 * b; temp_magnitude.w = 255; } d_angle_image[__mul24(y, width) + x] = temp_angle; d_magnitude_image[__mul24(y, width) + x] = temp_magnitude; } }
89301c46cf01b4c58b41e7d5d0be700e55e11e89.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "MultinomialNBLearnKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *feature_probs = NULL; hipMalloc(&feature_probs, XSIZE*YSIZE); float *class_priors = NULL; hipMalloc(&class_priors, XSIZE*YSIZE); const float *d_row_sums = NULL; hipMalloc(&d_row_sums, XSIZE*YSIZE); unsigned int n_samples_ = 1; unsigned int n_classes_ = 1; unsigned int n_features_ = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( MultinomialNBLearnKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, feature_probs,class_priors,d_row_sums,n_samples_,n_classes_,n_features_); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( MultinomialNBLearnKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, feature_probs,class_priors,d_row_sums,n_samples_,n_classes_,n_features_); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( MultinomialNBLearnKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, feature_probs,class_priors,d_row_sums,n_samples_,n_classes_,n_features_); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
89301c46cf01b4c58b41e7d5d0be700e55e11e89.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "MultinomialNBLearnKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *feature_probs = NULL; cudaMalloc(&feature_probs, XSIZE*YSIZE); float *class_priors = NULL; cudaMalloc(&class_priors, XSIZE*YSIZE); const float *d_row_sums = NULL; cudaMalloc(&d_row_sums, XSIZE*YSIZE); unsigned int n_samples_ = 1; unsigned int n_classes_ = 1; unsigned int n_features_ = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); MultinomialNBLearnKernel<<<gridBlock,threadBlock>>>(feature_probs,class_priors,d_row_sums,n_samples_,n_classes_,n_features_); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { MultinomialNBLearnKernel<<<gridBlock,threadBlock>>>(feature_probs,class_priors,d_row_sums,n_samples_,n_classes_,n_features_); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { MultinomialNBLearnKernel<<<gridBlock,threadBlock>>>(feature_probs,class_priors,d_row_sums,n_samples_,n_classes_,n_features_); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
93a47beb7b00a0371c63ed2f0a054a6879181c48.hip
// !!! This is a file automatically generated by hipify!!! //#include <opencv2/gpu/gpu.hpp> #include <opencv2/opencv.hpp> #include <opencv2/core/version.hpp> #include <opencv2/imgproc/imgproc.hpp> using namespace cv; #if CV_VERSION_EPOCH == 2 #define OPENCV2 #include <opencv2/gpu/gpu.hpp> namespace GPU = cv::gpu; #elif CV_VERSION_MAJOR == 4 #define OPENCV4 #include <opencv2/core/cuda.hpp> namespace GPU = cv::cuda; #endif #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <opencv2/imgproc.hpp> #include <iostream> #define THREAD_X 32 #define THREAD_Y 32 #define WRAP_NUM 32 #define MAX_WRAP_NUM 32 //using namespace cv; //using namespace cv; __constant__ double guass_kernel[2048]; __constant__ double guass_kernel_x[128*2]; __constant__ double guass_kernel_y[128]; int KERNEL_SIZE; __global__ void convolution(GPU::PtrStepSz<float> src,/*const double* __restrict__ guass_kernel,*/GPU::PtrStepSz<float> dst,int kernel_size,int kernel_radius,int orign_width,int orign_height){ //__shared__ int share_mem[WRAP_SIZE][MAX_WRAP_NUM]; int pixel_i=blockDim.x*blockIdx.x+threadIdx.x; int pixel_j=blockDim.y*blockIdx.y+threadIdx.y; //need to do bound check //printf("pixel %d %d block dim %d %d\n",pixel_i,pixel_j,blockDim.x,blockDim.y); int thread_block_index=threadIdx.x+threadIdx.y*blockDim.x; /*int share_i=thread_block_index%WRAP_NUM; int share_j=thread_block_index/WRAP_NUM;*/ double sum=0; //share_mem[share_i][share_j]=src(pixel_i,pixel_j); //share_mem[threadIdx.x][threadIdx.y]=src(pixel_i,pixel_j).x; //__syncthreads(); //printf("%d %d %d\n",pixel_i,pixel_j,share_mem[pixel_i][pixel_j]); if(!(pixel_i<kernel_radius || pixel_j<kernel_radius || pixel_i>=orign_width+kernel_radius || pixel_j>=orign_height+kernel_radius)){ int start_i=pixel_i-kernel_radius,start_j=pixel_j-kernel_radius; for(int i=0;i<kernel_size;i++){ for(int j=0;j<kernel_size;j++){ int index_i=start_i+i,index_j=start_j+j; //sum+=share_mem[][index_j]*guass_kernel(i,j).x; sum+=src(index_j,index_i)*(float)guass_kernel[j]; } } dst(pixel_j-kernel_radius,pixel_i-kernel_radius)=sum;//sum; } return ; } //not need to padding __global__ void conv_x(GPU::PtrStepSz<uchar3> src,/*const double* __restrict__ guass_kernel,*/GPU::PtrStepSz<uchar3> dst,int kernel_size,int kernel_radius,int orign_width,int orign_height){ __shared__ float share_mem[100][100]; /*int pixel_i=blockDim.x*blockIdx.x+threadIdx.x; int pixel_j=blockDim.y*blockIdx.y+threadIdx.y; int shared_i=threadIdx.x+kernel_size/2; int shared_j=threadIdx.y; float sum=0; if(!(pixel_i>=orign_width || pixel_j>=orign_height)){ share_mem[shared_j][shared_i]=src(pixel_j,pixel_i); __syncthreads(); int start_i=shared_i-kernel_radius,start_j=shared_j; for(int i=0;i<kernel_size;i++){ sum+=share_mem[start_j][start_i+i]*(float)guass_kernel_x[i]; } dst(pixel_j,pixel_i)=sum;//src(pixel_j,pixel_i);//sum;//sum; }*/ int left_limit=kernel_radius,right_limit=blockDim.x-kernel_radius; int pixel_i=blockDim.x*blockIdx.x+threadIdx.x-2*blockIdx.x*kernel_radius; int pixel_j=blockDim.y*blockIdx.y+threadIdx.y; int thread_block_index=threadIdx.x+threadIdx.y*blockDim.x; // share_mem[thread_block_index%32][thread_block_index/32]=src(pixel_j,pixel_i); //share_mem[10]=src(pixel_j,pixel_i); __syncthreads(); float sum=0,sum1=0,sum2=0; if(!(pixel_i<kernel_radius || pixel_j<kernel_radius || pixel_i>=orign_width+kernel_radius || pixel_j>=orign_height+kernel_radius)){//real image size if(threadIdx.x>= left_limit && threadIdx.x<right_limit){ //non padding size int x=threadIdx.x-kernel_radius,y=threadIdx.y; for(int i=0;i<kernel_size;i++){ thread_block_index=(x+i)+y*blockDim.x; // if(thread_block_index>=2048 || thread_block_index<0) // printf("%d\n",thread_block_index/WRAP_NUM); sum+=src(pixel_j,pixel_i-kernel_radius+i).x*(float)guass_kernel_x[i]; // sum+=share_mem[thread_block_index%32][thread_block_index/32]*(float)guass_kernel_x[i]; sum1+=src(pixel_j,pixel_i-kernel_radius+i).y*(float)guass_kernel_x[i]; sum2+=src(pixel_j,pixel_i-kernel_radius+i).z*(float)guass_kernel_x[i]; } dst(pixel_j-kernel_radius,pixel_i-kernel_radius).x=sum;//src(pixel_j,pixel_i); dst(pixel_j-kernel_radius,pixel_i-kernel_radius).y=sum1; dst(pixel_j-kernel_radius,pixel_i-kernel_radius).z=sum2; } //dst(pixel_j,pixel_i)=sum; } //dst(pixel_j,pixel_i)=sum; return ; } __global__ void conv_y(GPU::PtrStepSz<uchar3> src,/*const double* __restrict__ guass_kernel,*/GPU::PtrStepSz<uchar3> dst,int kernel_size,int kernel_radius,int orign_width,int orign_height){ __shared__ float share_mem[100][100]; /*int pixel_i=blockDim.x*blockIdx.x+threadIdx.x; int pixel_j=blockDim.y*blockIdx.y+threadIdx.y; int shared_i=threadIdx.x; int shared_j=threadIdx.y+kernel_size/2; float sum=0; if(!(pixel_i>=orign_width || pixel_j>=orign_height)){ share_mem[shared_j][shared_i]=src(pixel_j,pixel_i); __syncthreads(); int start_i=shared_i, start_j=shared_j-kernel_radius; for(int i=0;i<kernel_size;i++){ sum+=share_mem[start_j+i][start_i]*(float)guass_kernel_x[i]; //sum+=share_mem[start_j+i][start_i]; } dst(pixel_j,pixel_i)=sum;//share_mem[shared_j][shared_i];//sum; }*/ int top_limit=kernel_radius,down_limit=blockDim.y-kernel_radius; int pixel_i=blockDim.x*blockIdx.x+threadIdx.x; int pixel_j=blockDim.y*blockIdx.y+threadIdx.y-2*blockIdx.y*kernel_radius; int thread_block_index=threadIdx.x+threadIdx.y*blockDim.x; // share_mem[thread_block_index%32][thread_block_index/32]=src(pixel_j,pixel_i); __syncthreads(); float sum=0.0,sum1=0,sum2=0; if(!(pixel_i<kernel_radius || pixel_j<kernel_radius || pixel_i>=orign_width+kernel_radius || pixel_j>=orign_height+kernel_radius)){ if(threadIdx.y>= top_limit && threadIdx.y<down_limit){ int x=threadIdx.x,y=threadIdx.y-kernel_radius; for(int i=0;i<kernel_size;i++){ thread_block_index=x+(y+i)*blockDim.x; // sum+=share_mem[thread_block_index%32][thread_block_index/32]*(float)guass_kernel_x[i]; sum+=src(pixel_j-kernel_radius+i,pixel_i).x*(float)guass_kernel_x[i]; sum1+=/*share_mem[thread_block_index%WRAP_NUM][thread_block_index/WRAP_NUM]*/src(pixel_j-kernel_radius+i,pixel_i).y*(float)guass_kernel_x[i]; sum2+=src(pixel_j-kernel_radius+i,pixel_i).z*(float)guass_kernel_x[i]; } dst(pixel_j-kernel_radius,pixel_i-kernel_radius).x=sum;//src(pixel_j,pixel_i);//sum; dst(pixel_j-kernel_radius,pixel_i-kernel_radius).y=sum1; dst(pixel_j-kernel_radius,pixel_i-kernel_radius).z=sum2; } } //dst(pixel_j,pixel_i)=sum; return ; } void guassain_conv(const Mat *src,Mat *dst,double sigma){ // int depth = CV_MAT_DEPTH(src.type()); KERNEL_SIZE = cvRound(sigma* 4 * 2 + 1)|1; std::cout<<KERNEL_SIZE<<std::endl; int kernel_radius=KERNEL_SIZE/2; int orign_width=src->cols,orign_height=src->rows; Mat padding_image; GPU::GpuMat device_image,g_kernel,result, dev_image,resul; if(GPU::getCudaEnabledDeviceCount()==0){ std::cout<<"not use GPU module"<<std::endl; return ; } Mat gauss_x=getGaussianKernel(KERNEL_SIZE,sigma),gauss_y=getGaussianKernel(KERNEL_SIZE,sigma); //3*3 filter //Mat gauss_kernel=gauss_x*gauss_y.t(); //allocate /*double* gs_kernel,*dev_kernel; hipHostMalloc(&gs_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE,hipHostMallocDefault); for(int i=0;i<KERNEL_SIZE;i++){ double* row=gauss_kernel.ptr<double>(i); for(int j=0;j<KERNEL_SIZE;j++){ gs_kernel[i*KERNEL_SIZE+j]=row[j]; } }*/ //hipMalloc(&dev_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE); //allocate //allocate double* x,*y; hipHostMalloc(&x,sizeof(double)*KERNEL_SIZE*2,hipHostMallocDefault); double *row_x=gauss_x.ptr<double>(0),*row_y=gauss_y.ptr<double>(0); for(int i=0;i<KERNEL_SIZE*2;i++){ if(i<KERNEL_SIZE){ x[i]=row_x[i]; //std::cout<<x[i]<<std::endl; } else x[i]=row_y[i-KERNEL_SIZE]; } //hipHostMalloc(&y,sizeof(double)*KERNEL_SIZE,hipHostMallocDefault); //allocate copyMakeBorder(*src,padding_image,kernel_radius,kernel_radius,kernel_radius,kernel_radius,BORDER_CONSTANT, 0); int orign_grid_num_x=(src->cols+THREAD_X-1)/THREAD_X,orign_grid_num_y=(src->rows+THREAD_Y-1)/THREAD_Y; int grid_num_x=orign_grid_num_x+(2*kernel_radius*orign_grid_num_x+THREAD_X-1)/THREAD_X,grid_num_y=orign_grid_num_y+(2*kernel_radius*orign_grid_num_y+THREAD_Y-1)/THREAD_Y; //int grid_num_x=(src->cols+THREAD_X-1)/THREAD_X,grid_num_y=(src->rows+THREAD_Y-1)/THREAD_Y; result.upload(*dst); //g_kernel.upload(gauss_kernel); //use seperate do no padding //device_image.upload(padding_image); device_image.upload(padding_image); //device_image.upload(*src); hipMemcpyToSymbol(guass_kernel_x,x,sizeof(double)*2*KERNEL_SIZE); //hipMemcpyToSymbol(guass_kernel,gs_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE); dim3 thread_block(THREAD_X,THREAD_Y); dim3 grid(grid_num_x,grid_num_y); //convolution<<<grid,thread_block>>>(device_image,result,KERNEL_SIZE,kernel_radius,orign_width,orign_height); hipLaunchKernelGGL(( conv_x), dim3(grid),dim3(thread_block), 0, 0, device_image,result,KERNEL_SIZE,kernel_radius,orign_width,orign_height); //hipDeviceSynchronize(); Mat re; result.download(re); copyMakeBorder(re,padding_image,kernel_radius,kernel_radius,kernel_radius,kernel_radius,BORDER_CONSTANT, 0); //resul.upload(re); device_image.upload(padding_image); hipLaunchKernelGGL(( conv_y), dim3(grid),dim3(thread_block), 0, 0, device_image,result,KERNEL_SIZE,kernel_radius,orign_width,orign_height); result.download(*dst); return ; }
93a47beb7b00a0371c63ed2f0a054a6879181c48.cu
//#include <opencv2/gpu/gpu.hpp> #include <opencv2/opencv.hpp> #include <opencv2/core/version.hpp> #include <opencv2/imgproc/imgproc.hpp> using namespace cv; #if CV_VERSION_EPOCH == 2 #define OPENCV2 #include <opencv2/gpu/gpu.hpp> namespace GPU = cv::gpu; #elif CV_VERSION_MAJOR == 4 #define OPENCV4 #include <opencv2/core/cuda.hpp> namespace GPU = cv::cuda; #endif #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <opencv2/imgproc.hpp> #include <iostream> #define THREAD_X 32 #define THREAD_Y 32 #define WRAP_NUM 32 #define MAX_WRAP_NUM 32 //using namespace cv; //using namespace cv; __constant__ double guass_kernel[2048]; __constant__ double guass_kernel_x[128*2]; __constant__ double guass_kernel_y[128]; int KERNEL_SIZE; __global__ void convolution(GPU::PtrStepSz<float> src,/*const double* __restrict__ guass_kernel,*/GPU::PtrStepSz<float> dst,int kernel_size,int kernel_radius,int orign_width,int orign_height){ //__shared__ int share_mem[WRAP_SIZE][MAX_WRAP_NUM]; int pixel_i=blockDim.x*blockIdx.x+threadIdx.x; int pixel_j=blockDim.y*blockIdx.y+threadIdx.y; //need to do bound check //printf("pixel %d %d block dim %d %d\n",pixel_i,pixel_j,blockDim.x,blockDim.y); int thread_block_index=threadIdx.x+threadIdx.y*blockDim.x; /*int share_i=thread_block_index%WRAP_NUM; int share_j=thread_block_index/WRAP_NUM;*/ double sum=0; //share_mem[share_i][share_j]=src(pixel_i,pixel_j); //share_mem[threadIdx.x][threadIdx.y]=src(pixel_i,pixel_j).x; //__syncthreads(); //printf("%d %d %d\n",pixel_i,pixel_j,share_mem[pixel_i][pixel_j]); if(!(pixel_i<kernel_radius || pixel_j<kernel_radius || pixel_i>=orign_width+kernel_radius || pixel_j>=orign_height+kernel_radius)){ int start_i=pixel_i-kernel_radius,start_j=pixel_j-kernel_radius; for(int i=0;i<kernel_size;i++){ for(int j=0;j<kernel_size;j++){ int index_i=start_i+i,index_j=start_j+j; //sum+=share_mem[][index_j]*guass_kernel(i,j).x; sum+=src(index_j,index_i)*(float)guass_kernel[j]; } } dst(pixel_j-kernel_radius,pixel_i-kernel_radius)=sum;//sum; } return ; } //not need to padding __global__ void conv_x(GPU::PtrStepSz<uchar3> src,/*const double* __restrict__ guass_kernel,*/GPU::PtrStepSz<uchar3> dst,int kernel_size,int kernel_radius,int orign_width,int orign_height){ __shared__ float share_mem[100][100]; /*int pixel_i=blockDim.x*blockIdx.x+threadIdx.x; int pixel_j=blockDim.y*blockIdx.y+threadIdx.y; int shared_i=threadIdx.x+kernel_size/2; int shared_j=threadIdx.y; float sum=0; if(!(pixel_i>=orign_width || pixel_j>=orign_height)){ share_mem[shared_j][shared_i]=src(pixel_j,pixel_i); __syncthreads(); int start_i=shared_i-kernel_radius,start_j=shared_j; for(int i=0;i<kernel_size;i++){ sum+=share_mem[start_j][start_i+i]*(float)guass_kernel_x[i]; } dst(pixel_j,pixel_i)=sum;//src(pixel_j,pixel_i);//sum;//sum; }*/ int left_limit=kernel_radius,right_limit=blockDim.x-kernel_radius; int pixel_i=blockDim.x*blockIdx.x+threadIdx.x-2*blockIdx.x*kernel_radius; int pixel_j=blockDim.y*blockIdx.y+threadIdx.y; int thread_block_index=threadIdx.x+threadIdx.y*blockDim.x; // share_mem[thread_block_index%32][thread_block_index/32]=src(pixel_j,pixel_i); //share_mem[10]=src(pixel_j,pixel_i); __syncthreads(); float sum=0,sum1=0,sum2=0; if(!(pixel_i<kernel_radius || pixel_j<kernel_radius || pixel_i>=orign_width+kernel_radius || pixel_j>=orign_height+kernel_radius)){//real image size if(threadIdx.x>= left_limit && threadIdx.x<right_limit){ //non padding size int x=threadIdx.x-kernel_radius,y=threadIdx.y; for(int i=0;i<kernel_size;i++){ thread_block_index=(x+i)+y*blockDim.x; // if(thread_block_index>=2048 || thread_block_index<0) // printf("%d\n",thread_block_index/WRAP_NUM); sum+=src(pixel_j,pixel_i-kernel_radius+i).x*(float)guass_kernel_x[i]; // sum+=share_mem[thread_block_index%32][thread_block_index/32]*(float)guass_kernel_x[i]; sum1+=src(pixel_j,pixel_i-kernel_radius+i).y*(float)guass_kernel_x[i]; sum2+=src(pixel_j,pixel_i-kernel_radius+i).z*(float)guass_kernel_x[i]; } dst(pixel_j-kernel_radius,pixel_i-kernel_radius).x=sum;//src(pixel_j,pixel_i); dst(pixel_j-kernel_radius,pixel_i-kernel_radius).y=sum1; dst(pixel_j-kernel_radius,pixel_i-kernel_radius).z=sum2; } //dst(pixel_j,pixel_i)=sum; } //dst(pixel_j,pixel_i)=sum; return ; } __global__ void conv_y(GPU::PtrStepSz<uchar3> src,/*const double* __restrict__ guass_kernel,*/GPU::PtrStepSz<uchar3> dst,int kernel_size,int kernel_radius,int orign_width,int orign_height){ __shared__ float share_mem[100][100]; /*int pixel_i=blockDim.x*blockIdx.x+threadIdx.x; int pixel_j=blockDim.y*blockIdx.y+threadIdx.y; int shared_i=threadIdx.x; int shared_j=threadIdx.y+kernel_size/2; float sum=0; if(!(pixel_i>=orign_width || pixel_j>=orign_height)){ share_mem[shared_j][shared_i]=src(pixel_j,pixel_i); __syncthreads(); int start_i=shared_i, start_j=shared_j-kernel_radius; for(int i=0;i<kernel_size;i++){ sum+=share_mem[start_j+i][start_i]*(float)guass_kernel_x[i]; //sum+=share_mem[start_j+i][start_i]; } dst(pixel_j,pixel_i)=sum;//share_mem[shared_j][shared_i];//sum; }*/ int top_limit=kernel_radius,down_limit=blockDim.y-kernel_radius; int pixel_i=blockDim.x*blockIdx.x+threadIdx.x; int pixel_j=blockDim.y*blockIdx.y+threadIdx.y-2*blockIdx.y*kernel_radius; int thread_block_index=threadIdx.x+threadIdx.y*blockDim.x; // share_mem[thread_block_index%32][thread_block_index/32]=src(pixel_j,pixel_i); __syncthreads(); float sum=0.0,sum1=0,sum2=0; if(!(pixel_i<kernel_radius || pixel_j<kernel_radius || pixel_i>=orign_width+kernel_radius || pixel_j>=orign_height+kernel_radius)){ if(threadIdx.y>= top_limit && threadIdx.y<down_limit){ int x=threadIdx.x,y=threadIdx.y-kernel_radius; for(int i=0;i<kernel_size;i++){ thread_block_index=x+(y+i)*blockDim.x; // sum+=share_mem[thread_block_index%32][thread_block_index/32]*(float)guass_kernel_x[i]; sum+=src(pixel_j-kernel_radius+i,pixel_i).x*(float)guass_kernel_x[i]; sum1+=/*share_mem[thread_block_index%WRAP_NUM][thread_block_index/WRAP_NUM]*/src(pixel_j-kernel_radius+i,pixel_i).y*(float)guass_kernel_x[i]; sum2+=src(pixel_j-kernel_radius+i,pixel_i).z*(float)guass_kernel_x[i]; } dst(pixel_j-kernel_radius,pixel_i-kernel_radius).x=sum;//src(pixel_j,pixel_i);//sum; dst(pixel_j-kernel_radius,pixel_i-kernel_radius).y=sum1; dst(pixel_j-kernel_radius,pixel_i-kernel_radius).z=sum2; } } //dst(pixel_j,pixel_i)=sum; return ; } void guassain_conv(const Mat *src,Mat *dst,double sigma){ // int depth = CV_MAT_DEPTH(src.type()); KERNEL_SIZE = cvRound(sigma* 4 * 2 + 1)|1; std::cout<<KERNEL_SIZE<<std::endl; int kernel_radius=KERNEL_SIZE/2; int orign_width=src->cols,orign_height=src->rows; Mat padding_image; GPU::GpuMat device_image,g_kernel,result, dev_image,resul; if(GPU::getCudaEnabledDeviceCount()==0){ std::cout<<"not use GPU module"<<std::endl; return ; } Mat gauss_x=getGaussianKernel(KERNEL_SIZE,sigma),gauss_y=getGaussianKernel(KERNEL_SIZE,sigma); //3*3 filter //Mat gauss_kernel=gauss_x*gauss_y.t(); //allocate /*double* gs_kernel,*dev_kernel; cudaHostAlloc(&gs_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE,cudaHostAllocDefault); for(int i=0;i<KERNEL_SIZE;i++){ double* row=gauss_kernel.ptr<double>(i); for(int j=0;j<KERNEL_SIZE;j++){ gs_kernel[i*KERNEL_SIZE+j]=row[j]; } }*/ //cudaMalloc(&dev_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE); //allocate //allocate double* x,*y; cudaHostAlloc(&x,sizeof(double)*KERNEL_SIZE*2,cudaHostAllocDefault); double *row_x=gauss_x.ptr<double>(0),*row_y=gauss_y.ptr<double>(0); for(int i=0;i<KERNEL_SIZE*2;i++){ if(i<KERNEL_SIZE){ x[i]=row_x[i]; //std::cout<<x[i]<<std::endl; } else x[i]=row_y[i-KERNEL_SIZE]; } //cudaHostAlloc(&y,sizeof(double)*KERNEL_SIZE,cudaHostAllocDefault); //allocate copyMakeBorder(*src,padding_image,kernel_radius,kernel_radius,kernel_radius,kernel_radius,BORDER_CONSTANT, 0); int orign_grid_num_x=(src->cols+THREAD_X-1)/THREAD_X,orign_grid_num_y=(src->rows+THREAD_Y-1)/THREAD_Y; int grid_num_x=orign_grid_num_x+(2*kernel_radius*orign_grid_num_x+THREAD_X-1)/THREAD_X,grid_num_y=orign_grid_num_y+(2*kernel_radius*orign_grid_num_y+THREAD_Y-1)/THREAD_Y; //int grid_num_x=(src->cols+THREAD_X-1)/THREAD_X,grid_num_y=(src->rows+THREAD_Y-1)/THREAD_Y; result.upload(*dst); //g_kernel.upload(gauss_kernel); //use seperate do no padding //device_image.upload(padding_image); device_image.upload(padding_image); //device_image.upload(*src); cudaMemcpyToSymbol(guass_kernel_x,x,sizeof(double)*2*KERNEL_SIZE); //cudaMemcpyToSymbol(guass_kernel,gs_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE); dim3 thread_block(THREAD_X,THREAD_Y); dim3 grid(grid_num_x,grid_num_y); //convolution<<<grid,thread_block>>>(device_image,result,KERNEL_SIZE,kernel_radius,orign_width,orign_height); conv_x<<<grid,thread_block>>>(device_image,result,KERNEL_SIZE,kernel_radius,orign_width,orign_height); //cudaDeviceSynchronize(); Mat re; result.download(re); copyMakeBorder(re,padding_image,kernel_radius,kernel_radius,kernel_radius,kernel_radius,BORDER_CONSTANT, 0); //resul.upload(re); device_image.upload(padding_image); conv_y<<<grid,thread_block>>>(device_image,result,KERNEL_SIZE,kernel_radius,orign_width,orign_height); result.download(*dst); return ; }
6e628f7280a444bac4ad6c65bf1c4e6898ddb6fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "rbbox_overlaps.hpp" #include "cuda_ellipse_overlaps.cuh" #include <vector> #include <iostream> #include <cmath> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; ////////////////////////////////////////////// //////////////////////////// __device__ inline float trangle_area(float * a, float * b, float * c) { return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0]))/2.0; } __device__ inline float area(float * int_pts, int num_of_inter) { float area = 0.0; for(int i = 0;i < num_of_inter - 2;i++) { area += fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4)); } return area; } __device__ inline void reorder_pts(float * int_pts, int num_of_inter) { if(num_of_inter > 0) { float center[2]; center[0] = 0.0; center[1] = 0.0; for(int i = 0;i < num_of_inter;i++) { center[0] += int_pts[2 * i]; center[1] += int_pts[2 * i + 1]; } center[0] /= num_of_inter; center[1] /= num_of_inter; float vs[16]; float v[2]; float d; for(int i = 0;i < num_of_inter;i++) { v[0] = int_pts[2 * i]-center[0]; v[1] = int_pts[2 * i + 1]-center[1]; d = sqrt(v[0] * v[0] + v[1] * v[1]); v[0] = v[0] / d; v[1] = v[1] / d; if(v[1] < 0) { v[0]= - 2 - v[0]; } vs[i] = v[0]; } float temp,tx,ty; int j; for(int i=1;i<num_of_inter;++i){ if(vs[i-1]>vs[i]){ temp = vs[i]; tx = int_pts[2*i]; ty = int_pts[2*i+1]; j=i; while(j>0&&vs[j-1]>temp){ vs[j] = vs[j-1]; int_pts[j*2] = int_pts[j*2-2]; int_pts[j*2+1] = int_pts[j*2-1]; j--; } vs[j] = temp; int_pts[j*2] = tx; int_pts[j*2+1] = ty; } } } } __device__ inline bool inter2line(float * pts1, float *pts2, int i, int j, float * temp_pts) { float a[2]; float b[2]; float c[2]; float d[2]; float area_abc, area_abd, area_cda, area_cdb; a[0] = pts1[2 * i]; a[1] = pts1[2 * i + 1]; b[0] = pts1[2 * ((i + 1) % 4)]; b[1] = pts1[2 * ((i + 1) % 4) + 1]; c[0] = pts2[2 * j]; c[1] = pts2[2 * j + 1]; d[0] = pts2[2 * ((j + 1) % 4)]; d[1] = pts2[2 * ((j + 1) % 4) + 1]; area_abc = trangle_area(a, b, c); area_abd = trangle_area(a, b, d); if(area_abc * area_abd >= -1e-5) { return false; } area_cda = trangle_area(c, d, a); area_cdb = area_cda + area_abc - area_abd; if (area_cda * area_cdb >= -1e-5) { return false; } float t = area_cda / (area_abd - area_abc); float dx = t * (b[0] - a[0]); float dy = t * (b[1] - a[1]); temp_pts[0] = a[0] + dx; temp_pts[1] = a[1] + dy; return true; } __device__ inline bool inrect(float pt_x, float pt_y, float * pts) { double ab[2]; double ad[2]; double ap[2]; double abab; double abap; double adad; double adap; ab[0] = pts[2] - pts[0]; ab[1] = pts[3] - pts[1]; ad[0] = pts[6] - pts[0]; ad[1] = pts[7] - pts[1]; ap[0] = pt_x - pts[0]; ap[1] = pt_y - pts[1]; abab = ab[0] * ab[0] + ab[1] * ab[1]; abap = ab[0] * ap[0] + ab[1] * ap[1]; adad = ad[0] * ad[0] + ad[1] * ad[1]; adap = ad[0] * ap[0] + ad[1] * ap[1]; bool result = (abab - abap >= -1) and (abap >= -1) and (adad - adap >= -1) and (adap >= -1); return result; } __device__ inline int inter_pts(float * pts1, float * pts2, float * int_pts) { int num_of_inter = 0; for(int i = 0;i < 4;i++) { if(inrect(pts1[2 * i], pts1[2 * i + 1], pts2)) { int_pts[num_of_inter * 2] = pts1[2 * i]; int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]; num_of_inter++; } if(inrect(pts2[2 * i], pts2[2 * i + 1], pts1)) { int_pts[num_of_inter * 2] = pts2[2 * i]; int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]; num_of_inter++; } } float temp_pts[2]; for(int i = 0;i < 4;i++) { for(int j = 0;j < 4;j++) { bool has_pts = inter2line(pts1, pts2, i, j, temp_pts); if(has_pts) { int_pts[num_of_inter * 2] = temp_pts[0]; int_pts[num_of_inter * 2 + 1] = temp_pts[1]; num_of_inter++; } } } return num_of_inter; } __device__ inline void convert_region(float * pts , float const * const region) { float angle = region[4]; float a_cos = cos(angle/180.0*3.1415926535); float a_sin = -sin(angle/180.0*3.1415926535);// anti clock-wise float ctr_x = region[0]; float ctr_y = region[1]; float h = region[2]; float w = region[3]; float pts_x[4]; float pts_y[4]; pts_x[0] = - w / 2; pts_x[1] = - w / 2; pts_x[2] = w / 2; pts_x[3] = w / 2; pts_y[0] = - h / 2; pts_y[1] = h / 2; pts_y[2] = h / 2; pts_y[3] = - h / 2; for(int i = 0;i < 4;i++) { pts[2 * i] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x; pts[2 * i + 1] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y; } } __device__ inline float inter(float const * const region1, float const * const region2) { float pts1[8]; float pts2[8]; float int_pts[16]; int num_of_inter; convert_region(pts1, region1); convert_region(pts2, region2); num_of_inter = inter_pts(pts1, pts2, int_pts); reorder_pts(int_pts, num_of_inter); return area(int_pts, num_of_inter); } __device__ inline float devRotateIoU(float const * const region1, float const * const region2) { if((fabs(region1[0] - region2[0]) < 1e-5) && (fabs(region1[1] - region2[1]) < 1e-5) && (fabs(region1[2] - region2[2]) < 1e-5) && (fabs(region1[3] - region2[3]) < 1e-5) && (fabs(region1[4] - region2[4]) < 1e-5)) { return 1.0; } double elp1[5], elp2[5], result; for(int i=0;i<5;i++) { elp1[i] = region1[i]; elp2[i] = region2[i]; } CalculateOverlap(elp1, elp2, &result); /* float area1 = region1[2] * region1[3]; float area2 = region2[2] * region2[3]; float area_inter = inter(region1, region2); float result = area_inter / (area1 + area2 - area_inter); */ if(result < 0) { result = 0.0; } return float(result); } __global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes, const float * dev_query_boxes, float* dev_overlaps) { const int col_start = blockIdx.y; const int row_start = blockIdx.x; const int row_size = min(N - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(K - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; __shared__ float block_query_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_query_boxes[threadIdx.x * 5 + 0] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_query_boxes[threadIdx.x * 5 + 1] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_query_boxes[threadIdx.x * 5 + 2] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_query_boxes[threadIdx.x * 5 + 3] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_query_boxes[threadIdx.x * 5 + 4] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } if (threadIdx.x < row_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { for(int i = 0;i < col_size; i++) { int offset = row_start*threadsPerBlock * K + col_start*threadsPerBlock + threadIdx.x*K+ i ; dev_overlaps[offset] = devRotateIoU(block_boxes + threadIdx.x * 5, block_query_boxes + i * 5); } } } void _set_device(int device_id) { int current_device; CUDA_CHECK(hipGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to hipSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(hipSetDevice(device_id)); } void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) { _set_device(device_id); float* overlaps_dev = NULL; float* boxes_dev = NULL; float* query_boxes_dev = NULL; CUDA_CHECK(hipMalloc(&boxes_dev, n * 5 * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes, n * 5 * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&query_boxes_dev, k * 5 * sizeof(float))); CUDA_CHECK(hipMemcpy(query_boxes_dev, query_boxes, k * 5 * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&overlaps_dev, n * k * sizeof(float))); if (true){} dim3 blocks(DIVUP(n, threadsPerBlock), DIVUP(k, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( overlaps_kernel), dim3(blocks), dim3(threads), 0, 0, n, k, boxes_dev, query_boxes_dev, overlaps_dev); CUDA_CHECK(hipMemcpy(overlaps, overlaps_dev, n * k * sizeof(float), hipMemcpyDeviceToHost)); CUDA_CHECK(hipFree(overlaps_dev)); CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(query_boxes_dev)); }
6e628f7280a444bac4ad6c65bf1c4e6898ddb6fc.cu
#include "rbbox_overlaps.hpp" #include "cuda_ellipse_overlaps.cuh" #include <vector> #include <iostream> #include <cmath> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; ////////////////////////////////////////////// //////////////////////////// __device__ inline float trangle_area(float * a, float * b, float * c) { return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0]))/2.0; } __device__ inline float area(float * int_pts, int num_of_inter) { float area = 0.0; for(int i = 0;i < num_of_inter - 2;i++) { area += fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4)); } return area; } __device__ inline void reorder_pts(float * int_pts, int num_of_inter) { if(num_of_inter > 0) { float center[2]; center[0] = 0.0; center[1] = 0.0; for(int i = 0;i < num_of_inter;i++) { center[0] += int_pts[2 * i]; center[1] += int_pts[2 * i + 1]; } center[0] /= num_of_inter; center[1] /= num_of_inter; float vs[16]; float v[2]; float d; for(int i = 0;i < num_of_inter;i++) { v[0] = int_pts[2 * i]-center[0]; v[1] = int_pts[2 * i + 1]-center[1]; d = sqrt(v[0] * v[0] + v[1] * v[1]); v[0] = v[0] / d; v[1] = v[1] / d; if(v[1] < 0) { v[0]= - 2 - v[0]; } vs[i] = v[0]; } float temp,tx,ty; int j; for(int i=1;i<num_of_inter;++i){ if(vs[i-1]>vs[i]){ temp = vs[i]; tx = int_pts[2*i]; ty = int_pts[2*i+1]; j=i; while(j>0&&vs[j-1]>temp){ vs[j] = vs[j-1]; int_pts[j*2] = int_pts[j*2-2]; int_pts[j*2+1] = int_pts[j*2-1]; j--; } vs[j] = temp; int_pts[j*2] = tx; int_pts[j*2+1] = ty; } } } } __device__ inline bool inter2line(float * pts1, float *pts2, int i, int j, float * temp_pts) { float a[2]; float b[2]; float c[2]; float d[2]; float area_abc, area_abd, area_cda, area_cdb; a[0] = pts1[2 * i]; a[1] = pts1[2 * i + 1]; b[0] = pts1[2 * ((i + 1) % 4)]; b[1] = pts1[2 * ((i + 1) % 4) + 1]; c[0] = pts2[2 * j]; c[1] = pts2[2 * j + 1]; d[0] = pts2[2 * ((j + 1) % 4)]; d[1] = pts2[2 * ((j + 1) % 4) + 1]; area_abc = trangle_area(a, b, c); area_abd = trangle_area(a, b, d); if(area_abc * area_abd >= -1e-5) { return false; } area_cda = trangle_area(c, d, a); area_cdb = area_cda + area_abc - area_abd; if (area_cda * area_cdb >= -1e-5) { return false; } float t = area_cda / (area_abd - area_abc); float dx = t * (b[0] - a[0]); float dy = t * (b[1] - a[1]); temp_pts[0] = a[0] + dx; temp_pts[1] = a[1] + dy; return true; } __device__ inline bool inrect(float pt_x, float pt_y, float * pts) { double ab[2]; double ad[2]; double ap[2]; double abab; double abap; double adad; double adap; ab[0] = pts[2] - pts[0]; ab[1] = pts[3] - pts[1]; ad[0] = pts[6] - pts[0]; ad[1] = pts[7] - pts[1]; ap[0] = pt_x - pts[0]; ap[1] = pt_y - pts[1]; abab = ab[0] * ab[0] + ab[1] * ab[1]; abap = ab[0] * ap[0] + ab[1] * ap[1]; adad = ad[0] * ad[0] + ad[1] * ad[1]; adap = ad[0] * ap[0] + ad[1] * ap[1]; bool result = (abab - abap >= -1) and (abap >= -1) and (adad - adap >= -1) and (adap >= -1); return result; } __device__ inline int inter_pts(float * pts1, float * pts2, float * int_pts) { int num_of_inter = 0; for(int i = 0;i < 4;i++) { if(inrect(pts1[2 * i], pts1[2 * i + 1], pts2)) { int_pts[num_of_inter * 2] = pts1[2 * i]; int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]; num_of_inter++; } if(inrect(pts2[2 * i], pts2[2 * i + 1], pts1)) { int_pts[num_of_inter * 2] = pts2[2 * i]; int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]; num_of_inter++; } } float temp_pts[2]; for(int i = 0;i < 4;i++) { for(int j = 0;j < 4;j++) { bool has_pts = inter2line(pts1, pts2, i, j, temp_pts); if(has_pts) { int_pts[num_of_inter * 2] = temp_pts[0]; int_pts[num_of_inter * 2 + 1] = temp_pts[1]; num_of_inter++; } } } return num_of_inter; } __device__ inline void convert_region(float * pts , float const * const region) { float angle = region[4]; float a_cos = cos(angle/180.0*3.1415926535); float a_sin = -sin(angle/180.0*3.1415926535);// anti clock-wise float ctr_x = region[0]; float ctr_y = region[1]; float h = region[2]; float w = region[3]; float pts_x[4]; float pts_y[4]; pts_x[0] = - w / 2; pts_x[1] = - w / 2; pts_x[2] = w / 2; pts_x[3] = w / 2; pts_y[0] = - h / 2; pts_y[1] = h / 2; pts_y[2] = h / 2; pts_y[3] = - h / 2; for(int i = 0;i < 4;i++) { pts[2 * i] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x; pts[2 * i + 1] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y; } } __device__ inline float inter(float const * const region1, float const * const region2) { float pts1[8]; float pts2[8]; float int_pts[16]; int num_of_inter; convert_region(pts1, region1); convert_region(pts2, region2); num_of_inter = inter_pts(pts1, pts2, int_pts); reorder_pts(int_pts, num_of_inter); return area(int_pts, num_of_inter); } __device__ inline float devRotateIoU(float const * const region1, float const * const region2) { if((fabs(region1[0] - region2[0]) < 1e-5) && (fabs(region1[1] - region2[1]) < 1e-5) && (fabs(region1[2] - region2[2]) < 1e-5) && (fabs(region1[3] - region2[3]) < 1e-5) && (fabs(region1[4] - region2[4]) < 1e-5)) { return 1.0; } double elp1[5], elp2[5], result; for(int i=0;i<5;i++) { elp1[i] = region1[i]; elp2[i] = region2[i]; } CalculateOverlap(elp1, elp2, &result); /* float area1 = region1[2] * region1[3]; float area2 = region2[2] * region2[3]; float area_inter = inter(region1, region2); float result = area_inter / (area1 + area2 - area_inter); */ if(result < 0) { result = 0.0; } return float(result); } __global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes, const float * dev_query_boxes, float* dev_overlaps) { const int col_start = blockIdx.y; const int row_start = blockIdx.x; const int row_size = min(N - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(K - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; __shared__ float block_query_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_query_boxes[threadIdx.x * 5 + 0] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_query_boxes[threadIdx.x * 5 + 1] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_query_boxes[threadIdx.x * 5 + 2] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_query_boxes[threadIdx.x * 5 + 3] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_query_boxes[threadIdx.x * 5 + 4] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } if (threadIdx.x < row_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { for(int i = 0;i < col_size; i++) { int offset = row_start*threadsPerBlock * K + col_start*threadsPerBlock + threadIdx.x*K+ i ; dev_overlaps[offset] = devRotateIoU(block_boxes + threadIdx.x * 5, block_query_boxes + i * 5); } } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) { _set_device(device_id); float* overlaps_dev = NULL; float* boxes_dev = NULL; float* query_boxes_dev = NULL; CUDA_CHECK(cudaMalloc(&boxes_dev, n * 5 * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes, n * 5 * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&query_boxes_dev, k * 5 * sizeof(float))); CUDA_CHECK(cudaMemcpy(query_boxes_dev, query_boxes, k * 5 * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&overlaps_dev, n * k * sizeof(float))); if (true){} dim3 blocks(DIVUP(n, threadsPerBlock), DIVUP(k, threadsPerBlock)); dim3 threads(threadsPerBlock); overlaps_kernel<<<blocks, threads>>>(n, k, boxes_dev, query_boxes_dev, overlaps_dev); CUDA_CHECK(cudaMemcpy(overlaps, overlaps_dev, n * k * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaFree(overlaps_dev)); CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(query_boxes_dev)); }
1ec91b44ed11ac0e46e9cff47f07305a3d38108d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstring> #include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/util/im2col.hpp" #include "caffe/vision_layers.hpp" #include "caffe/test/test_caffe_main.hpp" namespace caffe { // Forward declare kernel functions template <typename Dtype> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, Dtype* data_col); extern hipDeviceProp_t CAFFE_TEST_CUDA_PROP; template <typename Dtype> class Im2colKernelTest : public GPUDeviceTest<Dtype> { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(5, 500, 10, 10)), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); channels_ = blob_bottom_->channels(); pad_ = 0; stride_ = 2; kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; } virtual ~Im2colKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; } Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_cpu_; int height_; int width_; int channels_; int pad_; int stride_; int kernel_size_; int height_col_; int width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, cpu_data + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { int grid_dim = default_grid_dim/grid_div; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_gpu_kernel<TypeParam>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data + this->blob_bottom_->offset(n), this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->height_col_, this->width_col_, top_data + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = cpu_data[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } } // namespace caffe
1ec91b44ed11ac0e46e9cff47f07305a3d38108d.cu
#include <cstring> #include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/util/im2col.hpp" #include "caffe/vision_layers.hpp" #include "caffe/test/test_caffe_main.hpp" namespace caffe { // Forward declare kernel functions template <typename Dtype> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, Dtype* data_col); extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; template <typename Dtype> class Im2colKernelTest : public GPUDeviceTest<Dtype> { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(5, 500, 10, 10)), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); channels_ = blob_bottom_->channels(); pad_ = 0; stride_ = 2; kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; } virtual ~Im2colKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; } Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_cpu_; int height_; int width_; int channels_; int pad_; int stride_; int kernel_size_; int height_col_; int width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, cpu_data + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { int grid_dim = default_grid_dim/grid_div; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel<TypeParam><<<grid_dim, CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data + this->blob_bottom_->offset(n), this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->height_col_, this->width_col_, top_data + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = cpu_data[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } } // namespace caffe
5ecb96bfecadaf87ab4c6c9ec1c376528b120a63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #define Y1(i,j) Y1[((i)*(A))+(j)] #define Yf(i,j) Yf[((i)*(B1))+(j)] #define Y2(i,j) Y2[((i)*(C))+(j)] #define Z1(i,j) Z1[((i)*(C))+(j)] #define X1(i,j) X1[((i)*(B))+(j)] #define X2(i,j) X2[((i)*(C))+(j)] #define Y(i,j) Y[((i)*(B))+(j)] #define Z(i,j) Z[((i)*(B))+(j)] //#define I(i,j) I[((i)*(A))+(j)] #define foo(a,b) b?tanh(a):exp(a) #define FOOTPRINT_SIZE 64 #define BLOCK_SIZE 32 #define THREADS_PER_BLOCK 32 //for Pointwise calculations void *myCudaMalloc1(size_t len) { void *p; hipMalloc(&p, len); return p; } void displayMatrix2 (const char *label, double *m, int rows, int cols) { printf ("\n%s:\n", label); for(int i = 0; i < rows; ++i ) { for(int j = 0; j < cols; ++j ) printf("%10.5lf\t",m[(i*cols)+j]); printf ("\n"); } } __global__ void MatMulKernel(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB); //__global__ void MatMulKernel01(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB); __global__ void cuMinus(double *C, double *A, double *B, int n, double delta=1); __global__ void cuGradientFunc(double *A, double *B, long n, long n_cols); __global__ void cuFunc(double *A, double *B, long n, long n_cols, long val); __global__ void cuDivideByVec(double *C, double *A, double *B, long n, long n_cols); __global__ void cu_sum(const double* src, double* sum, double *global_mem, const int n); //---------------------------Helper Host Functions------------------------------------------------------------------------------------------------ void initializeW(double* X1, long A, long B){ /*Initializes the weights*/ long i,j; for (i=0; i<A;i++) for (j=0; j<B;j++) X1(i,j) = ((double)rand() / (double)RAND_MAX) * 0.2 - 0.1; } void initializeI(double* X1, long A, long B){ /*Initializes the inputs*/ long i,j; for (i=0; i<A;i++) for (j=0; j<B;j++) X1(i,j) = j%2; } void initializeO(double* X1, long A, long B){ /*Initializes the outputs*/ long i,j; for (i=0; i<A;i++) for (j=0; j<B;j++) X1(i,j) = i%2; } void mm(double* X2, double* Y, double* Z1, long A, long B, long C){ /*Performs Matrix-Matrix Mulitplication*/ /* long i,j,k; for (i=0; i<A; i++) for (j=0; j<B; j++) for(k=0; k<C; k++) { if(j==0) X2(i,k)=0; X2(i,k) += Y(i,j) * Z1(j,k); } */ dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); bool transA = false, transB = false; int A_width = B; int A_height = A; int B_width = C; int B_height = B; //printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width); int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1); int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1); dim3 dimGrid( grid_size_x, grid_size_y); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, X2,Y,Z1,A_width,A_height,B_width,B_height, transA,transB); } void mmt(double* X1, double* Y2, double* Z1, long A, long B, long C){ /*Performs Matrix-Transposed Matrix Mulitplication*/ /* long i,j,k; for (i=0; i<A; i++) for (j=0; j<B; j++) { X1(i,j)=0; for(k=0; k<C; k++) X1(i,j) += Z1(i,k) * Y2(j,k) ; //Z1(i,k) } */ //printf("%d %d %d\n",A,B,C); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); bool transA = false, transB = true; int A_width = C; int A_height = A; int B_width = C; int B_height = B; //printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width); int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1); int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1); dim3 dimGrid( grid_size_x, grid_size_y); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, X1,Z1,Y2,A_width,A_height,B_width,B_height, transA,transB); } void mtm(double* X2, double* Y1, double* Z1, long A, long B, long C){ /*Performs Transposed Matrix- Matrix Mulitplication*/ /* long i,j,k; for (i=0; i<A; i++) for (j=0; j<B; j++) for(k=0; k<C; k++) { if(j==0) X2(i,k)=0; X2(i,k) += Y1(j,i) * Z1(j,k); } */ //printf("%d %d %d\n",A,B,C); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); bool transA = true, transB = false; int A_width = A; int A_height = B; int B_width = C; int B_height = B; //printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width); int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1); int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1); dim3 dimGrid( grid_size_x, grid_size_y); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, X2,Y1,Z1,A_width,A_height,B_width,B_height, transA,transB); } void func(double* X1, double* Yf, long A, long B1, long val){ /*Performs a point-wise operation*/ long B=B1+val; /* long i,j; for (i=0; i<A; i++) for (j=0; j<B1; j++) X1(i,(j+val)) = foo(Yf(i,j),val); */ long len = A*B1; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cuFunc), dim3(num_blocks), dim3(block_size), 0, 0, X1, Yf, len, B1, val); } void gradient_func(double* X1, double* Yf, long A, long B){ /*Performs a point-wise operation*/ long B1=B+1; /* long i,j; for (i=0; i<A; i++) for (j=0; j<B; j++) X1(i,j) = Yf(i, (j+1))*(1 - pow (tanh (X1(i,j)), 2)); */ long len = A*B; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cuGradientFunc), dim3(num_blocks), dim3(block_size), 0, 0, X1, Yf, len, B); } void error(double* X1, double* Y, double* Z, long A, long B){ /*Calculates the Error*/ /* long i,j; for (i=0; i<A; i++) for (j=0; j<B; j++) X1(i,j) = Y(i,j)-Z(i,j); */ long len = A*B; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cuMinus), dim3(num_blocks), dim3(block_size), 0, 0, X1, Y, Z, len); } void reduction(double* Y, double* X1, long A, long B){ /*Performs the summation of probabilities*/ /*long i,j; for (i=0; i<A; i++) { X1[i]=0; for (j=0; j<B; j++) X1[i] += Y(i,j); }*/ int len = B; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); double * HostX = (double * ) malloc(A * sizeof(double)); double *data; double *d_partial_sums; double *global_mem; data = (double * ) myCudaMalloc1(sizeof(double) * len); global_mem = (double * ) myCudaMalloc1( sizeof(double)* block_size); d_partial_sums = (double * ) myCudaMalloc1( sizeof(double)* num_blocks); for(int i = 0; i < A; ++i){ int tmp_block_size = block_size; int tmp_num_blocks = num_blocks; int data_len = len; hipMemcpy(data, Y + i * len, data_len * sizeof(double), hipMemcpyDeviceToDevice); while(true){ hipMemset(global_mem, 0, sizeof(double) * tmp_block_size); hipLaunchKernelGGL(( cu_sum), dim3(tmp_num_blocks), dim3(tmp_block_size), 0, 0, data, d_partial_sums, global_mem, data_len); hipDeviceSynchronize(); data_len = tmp_num_blocks; if(tmp_num_blocks == 1){ // copy the result back to the host double host_res = 0; hipMemcpy(&host_res, d_partial_sums, sizeof(double), hipMemcpyDeviceToHost); HostX[i] = host_res; break; }else if(tmp_num_blocks <= block_size){ tmp_block_size = data_len; tmp_num_blocks = 1; hipMemcpy(data, d_partial_sums, data_len * sizeof(double), hipMemcpyDeviceToDevice); }else{ tmp_block_size = THREADS_PER_BLOCK; tmp_num_blocks = (data_len / tmp_block_size) + ((data_len % tmp_block_size) ? 1 : 0); hipMemcpy(data, d_partial_sums, data_len * sizeof(double), hipMemcpyDeviceToDevice); } } } hipMemcpy(X1, HostX, A * sizeof(double), hipMemcpyHostToDevice); //copy back to the device hipFree(global_mem); hipFree(data); hipFree(d_partial_sums); free(HostX); //displayMatrix2("HostX", HostX, A, 1); } void prob(double* Y,double* Z, double* X1, long A, long B){ /*Computes the normalized exponential*/ /*long i,j; for (i=0; i<A; i++) for (j=0; j<B; j++) Z(i,j) = Y(i,j)/X1[i];*/ long len = A*B; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cuDivideByVec), dim3(num_blocks), dim3(block_size), 0, 0, Z, Y, X1, len, B); } void delta(double* Z, double* Y, long A, long B, double C){ /*Updates the weight matrix*/ /* long i,j; for (i=0; i<A; i++) for (j=0; j<B; j++) Z(i,j) -= C*Y(i,j); */ long len = A*B; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cuMinus), dim3(num_blocks), dim3(block_size), 0, 0, Z, Z, Y, len, C); } //----------------Device kernels--------------------------------- __global__ void MatMulKernel(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB) { int thread_row = threadIdx.y; int thread_col = threadIdx.x; int block_row = blockIdx.y; int block_col = blockIdx.x; int Row = block_row * BLOCK_SIZE + thread_row, Col = block_col * BLOCK_SIZE + thread_col; int C_width = transB?B_height:B_width; int C_height = transA?A_width:A_height; //if(transB && !block_col && !block_row && !thread_col && !thread_row)printf("C: %d %d\n",C_width, C_height); float Cvalue = 0; for (int m = 0; m < (transA?A_height:A_width - 1) / BLOCK_SIZE + 1; ++m) { __shared__ float shared_A[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE]; if(transA){ if(BLOCK_SIZE * m + thread_col < A_height && Row < A_width) { shared_A[thread_row][thread_col] = A[(BLOCK_SIZE * m + thread_col) * A_width + Row]; }else{ shared_A[thread_row][thread_col] = 0; } }else{ if(Row < A_height && BLOCK_SIZE * m + thread_col < A_width) { shared_A[thread_row][thread_col] = A[Row * A_width + BLOCK_SIZE * m + thread_col]; }else{ shared_A[thread_row][thread_col] = 0; } } if(transB){ if( Col < B_height && BLOCK_SIZE * m + thread_row < B_width) { shared_B[thread_row][thread_col] = B[ Col * B_width + BLOCK_SIZE * m + thread_row]; } else { shared_B[thread_row][thread_col] = 0; } }else{ if(BLOCK_SIZE * m + thread_row < B_height && Col < B_width ) { shared_B[thread_row][thread_col] = B[ (BLOCK_SIZE * m + thread_row) * B_width + Col]; } else { shared_B[thread_row][thread_col] = 0; } } // Synchronize to ensure all elements are read __syncthreads(); #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) Cvalue += shared_A[thread_row][e] * shared_B[e][thread_col]; __syncthreads(); } if(Row < C_height && Col < C_width) { C[Row * C_width + Col] = Cvalue; } } __global__ void cuMinus(double *C, double *A, double *B, int n, double delta){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ if(delta != 1){ C[tid] = A[tid] - B[tid] * delta; }else{ C[tid] = A[tid] - B[tid]; } tid += stride; } } __global__ void cuDivideByVec(double *C, double *A, double *B, long n, long n_cols){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ C[tid] = A[tid] / B[tid/n_cols]; tid += stride; } } __global__ void cuGradientFunc(double *A, double *B, long n, long n_cols){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ A[tid] = (1 - pow (tanh (A[tid]), 2)) * B[tid+1 + (tid)/n_cols]; tid += stride; } } __global__ void cuFunc(double *A, double *B, long n, long n_cols, long val){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ A[tid+ val*(val+tid/n_cols)] = foo(B[tid],val); tid += stride; } } __global__ void cu_sum(const double* src, double* sum, double *global_mem, const int n){ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // load input into __shared__ memory //for(int i = 0 ; i < n; i++)printf("%lf ",src[i]); // printf("\n"); double x = 0; if(tid < n){ x = src[tid]; } global_mem[threadIdx.x] = x; __syncthreads(); // contiguous range pattern for(int offset = blockDim.x / 2; offset > 0; offset >>= 1){ if(threadIdx.x < offset){ // add a partial sum upstream to our own global_mem[threadIdx.x] += global_mem[threadIdx.x + offset]; } // wait until all threads in the block have // updated their partial sums __syncthreads(); } // thread 0 writes the final result if(threadIdx.x == 0){ sum[blockIdx.x] = global_mem[0]; } __syncthreads(); }
5ecb96bfecadaf87ab4c6c9ec1c376528b120a63.cu
#include<stdio.h> #define Y1(i,j) Y1[((i)*(A))+(j)] #define Yf(i,j) Yf[((i)*(B1))+(j)] #define Y2(i,j) Y2[((i)*(C))+(j)] #define Z1(i,j) Z1[((i)*(C))+(j)] #define X1(i,j) X1[((i)*(B))+(j)] #define X2(i,j) X2[((i)*(C))+(j)] #define Y(i,j) Y[((i)*(B))+(j)] #define Z(i,j) Z[((i)*(B))+(j)] //#define I(i,j) I[((i)*(A))+(j)] #define foo(a,b) b?tanh(a):exp(a) #define FOOTPRINT_SIZE 64 #define BLOCK_SIZE 32 #define THREADS_PER_BLOCK 32 //for Pointwise calculations void *myCudaMalloc1(size_t len) { void *p; cudaMalloc(&p, len); return p; } void displayMatrix2 (const char *label, double *m, int rows, int cols) { printf ("\n%s:\n", label); for(int i = 0; i < rows; ++i ) { for(int j = 0; j < cols; ++j ) printf("%10.5lf\t",m[(i*cols)+j]); printf ("\n"); } } __global__ void MatMulKernel(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB); //__global__ void MatMulKernel01(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB); __global__ void cuMinus(double *C, double *A, double *B, int n, double delta=1); __global__ void cuGradientFunc(double *A, double *B, long n, long n_cols); __global__ void cuFunc(double *A, double *B, long n, long n_cols, long val); __global__ void cuDivideByVec(double *C, double *A, double *B, long n, long n_cols); __global__ void cu_sum(const double* src, double* sum, double *global_mem, const int n); //---------------------------Helper Host Functions------------------------------------------------------------------------------------------------ void initializeW(double* X1, long A, long B){ /*Initializes the weights*/ long i,j; for (i=0; i<A;i++) for (j=0; j<B;j++) X1(i,j) = ((double)rand() / (double)RAND_MAX) * 0.2 - 0.1; } void initializeI(double* X1, long A, long B){ /*Initializes the inputs*/ long i,j; for (i=0; i<A;i++) for (j=0; j<B;j++) X1(i,j) = j%2; } void initializeO(double* X1, long A, long B){ /*Initializes the outputs*/ long i,j; for (i=0; i<A;i++) for (j=0; j<B;j++) X1(i,j) = i%2; } void mm(double* X2, double* Y, double* Z1, long A, long B, long C){ /*Performs Matrix-Matrix Mulitplication*/ /* long i,j,k; for (i=0; i<A; i++) for (j=0; j<B; j++) for(k=0; k<C; k++) { if(j==0) X2(i,k)=0; X2(i,k) += Y(i,j) * Z1(j,k); } */ dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); bool transA = false, transB = false; int A_width = B; int A_height = A; int B_width = C; int B_height = B; //printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width); int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1); int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1); dim3 dimGrid( grid_size_x, grid_size_y); MatMulKernel<<<dimGrid,dimBlock>>>(X2,Y,Z1,A_width,A_height,B_width,B_height, transA,transB); } void mmt(double* X1, double* Y2, double* Z1, long A, long B, long C){ /*Performs Matrix-Transposed Matrix Mulitplication*/ /* long i,j,k; for (i=0; i<A; i++) for (j=0; j<B; j++) { X1(i,j)=0; for(k=0; k<C; k++) X1(i,j) += Z1(i,k) * Y2(j,k) ; //Z1(i,k) } */ //printf("%d %d %d\n",A,B,C); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); bool transA = false, transB = true; int A_width = C; int A_height = A; int B_width = C; int B_height = B; //printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width); int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1); int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1); dim3 dimGrid( grid_size_x, grid_size_y); MatMulKernel<<<dimGrid,dimBlock>>>(X1,Z1,Y2,A_width,A_height,B_width,B_height, transA,transB); } void mtm(double* X2, double* Y1, double* Z1, long A, long B, long C){ /*Performs Transposed Matrix- Matrix Mulitplication*/ /* long i,j,k; for (i=0; i<A; i++) for (j=0; j<B; j++) for(k=0; k<C; k++) { if(j==0) X2(i,k)=0; X2(i,k) += Y1(j,i) * Z1(j,k); } */ //printf("%d %d %d\n",A,B,C); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); bool transA = true, transB = false; int A_width = A; int A_height = B; int B_width = C; int B_height = B; //printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width); int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1); int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1); dim3 dimGrid( grid_size_x, grid_size_y); MatMulKernel<<<dimGrid,dimBlock>>>(X2,Y1,Z1,A_width,A_height,B_width,B_height, transA,transB); } void func(double* X1, double* Yf, long A, long B1, long val){ /*Performs a point-wise operation*/ long B=B1+val; /* long i,j; for (i=0; i<A; i++) for (j=0; j<B1; j++) X1(i,(j+val)) = foo(Yf(i,j),val); */ long len = A*B1; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cuFunc<<<num_blocks, block_size>>>(X1, Yf, len, B1, val); } void gradient_func(double* X1, double* Yf, long A, long B){ /*Performs a point-wise operation*/ long B1=B+1; /* long i,j; for (i=0; i<A; i++) for (j=0; j<B; j++) X1(i,j) = Yf(i, (j+1))*(1 - pow (tanh (X1(i,j)), 2)); */ long len = A*B; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cuGradientFunc<<<num_blocks, block_size>>>(X1, Yf, len, B); } void error(double* X1, double* Y, double* Z, long A, long B){ /*Calculates the Error*/ /* long i,j; for (i=0; i<A; i++) for (j=0; j<B; j++) X1(i,j) = Y(i,j)-Z(i,j); */ long len = A*B; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cuMinus<<<num_blocks, block_size>>>(X1, Y, Z, len); } void reduction(double* Y, double* X1, long A, long B){ /*Performs the summation of probabilities*/ /*long i,j; for (i=0; i<A; i++) { X1[i]=0; for (j=0; j<B; j++) X1[i] += Y(i,j); }*/ int len = B; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); double * HostX = (double * ) malloc(A * sizeof(double)); double *data; double *d_partial_sums; double *global_mem; data = (double * ) myCudaMalloc1(sizeof(double) * len); global_mem = (double * ) myCudaMalloc1( sizeof(double)* block_size); d_partial_sums = (double * ) myCudaMalloc1( sizeof(double)* num_blocks); for(int i = 0; i < A; ++i){ int tmp_block_size = block_size; int tmp_num_blocks = num_blocks; int data_len = len; cudaMemcpy(data, Y + i * len, data_len * sizeof(double), cudaMemcpyDeviceToDevice); while(true){ cudaMemset(global_mem, 0, sizeof(double) * tmp_block_size); cu_sum<<<tmp_num_blocks, tmp_block_size>>>(data, d_partial_sums, global_mem, data_len); cudaDeviceSynchronize(); data_len = tmp_num_blocks; if(tmp_num_blocks == 1){ // copy the result back to the host double host_res = 0; cudaMemcpy(&host_res, d_partial_sums, sizeof(double), cudaMemcpyDeviceToHost); HostX[i] = host_res; break; }else if(tmp_num_blocks <= block_size){ tmp_block_size = data_len; tmp_num_blocks = 1; cudaMemcpy(data, d_partial_sums, data_len * sizeof(double), cudaMemcpyDeviceToDevice); }else{ tmp_block_size = THREADS_PER_BLOCK; tmp_num_blocks = (data_len / tmp_block_size) + ((data_len % tmp_block_size) ? 1 : 0); cudaMemcpy(data, d_partial_sums, data_len * sizeof(double), cudaMemcpyDeviceToDevice); } } } cudaMemcpy(X1, HostX, A * sizeof(double), cudaMemcpyHostToDevice); //copy back to the device cudaFree(global_mem); cudaFree(data); cudaFree(d_partial_sums); free(HostX); //displayMatrix2("HostX", HostX, A, 1); } void prob(double* Y,double* Z, double* X1, long A, long B){ /*Computes the normalized exponential*/ /*long i,j; for (i=0; i<A; i++) for (j=0; j<B; j++) Z(i,j) = Y(i,j)/X1[i];*/ long len = A*B; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cuDivideByVec<<<num_blocks, block_size>>>(Z, Y, X1, len, B); } void delta(double* Z, double* Y, long A, long B, double C){ /*Updates the weight matrix*/ /* long i,j; for (i=0; i<A; i++) for (j=0; j<B; j++) Z(i,j) -= C*Y(i,j); */ long len = A*B; const size_t block_size = THREADS_PER_BLOCK; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cuMinus<<<num_blocks, block_size>>>(Z, Z, Y, len, C); } //----------------Device kernels--------------------------------- __global__ void MatMulKernel(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB) { int thread_row = threadIdx.y; int thread_col = threadIdx.x; int block_row = blockIdx.y; int block_col = blockIdx.x; int Row = block_row * BLOCK_SIZE + thread_row, Col = block_col * BLOCK_SIZE + thread_col; int C_width = transB?B_height:B_width; int C_height = transA?A_width:A_height; //if(transB && !block_col && !block_row && !thread_col && !thread_row)printf("C: %d %d\n",C_width, C_height); float Cvalue = 0; for (int m = 0; m < (transA?A_height:A_width - 1) / BLOCK_SIZE + 1; ++m) { __shared__ float shared_A[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE]; if(transA){ if(BLOCK_SIZE * m + thread_col < A_height && Row < A_width) { shared_A[thread_row][thread_col] = A[(BLOCK_SIZE * m + thread_col) * A_width + Row]; }else{ shared_A[thread_row][thread_col] = 0; } }else{ if(Row < A_height && BLOCK_SIZE * m + thread_col < A_width) { shared_A[thread_row][thread_col] = A[Row * A_width + BLOCK_SIZE * m + thread_col]; }else{ shared_A[thread_row][thread_col] = 0; } } if(transB){ if( Col < B_height && BLOCK_SIZE * m + thread_row < B_width) { shared_B[thread_row][thread_col] = B[ Col * B_width + BLOCK_SIZE * m + thread_row]; } else { shared_B[thread_row][thread_col] = 0; } }else{ if(BLOCK_SIZE * m + thread_row < B_height && Col < B_width ) { shared_B[thread_row][thread_col] = B[ (BLOCK_SIZE * m + thread_row) * B_width + Col]; } else { shared_B[thread_row][thread_col] = 0; } } // Synchronize to ensure all elements are read __syncthreads(); #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) Cvalue += shared_A[thread_row][e] * shared_B[e][thread_col]; __syncthreads(); } if(Row < C_height && Col < C_width) { C[Row * C_width + Col] = Cvalue; } } __global__ void cuMinus(double *C, double *A, double *B, int n, double delta){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ if(delta != 1){ C[tid] = A[tid] - B[tid] * delta; }else{ C[tid] = A[tid] - B[tid]; } tid += stride; } } __global__ void cuDivideByVec(double *C, double *A, double *B, long n, long n_cols){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ C[tid] = A[tid] / B[tid/n_cols]; tid += stride; } } __global__ void cuGradientFunc(double *A, double *B, long n, long n_cols){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ A[tid] = (1 - pow (tanh (A[tid]), 2)) * B[tid+1 + (tid)/n_cols]; tid += stride; } } __global__ void cuFunc(double *A, double *B, long n, long n_cols, long val){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ A[tid+ val*(val+tid/n_cols)] = foo(B[tid],val); tid += stride; } } __global__ void cu_sum(const double* src, double* sum, double *global_mem, const int n){ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // load input into __shared__ memory //for(int i = 0 ; i < n; i++)printf("%lf ",src[i]); // printf("\n"); double x = 0; if(tid < n){ x = src[tid]; } global_mem[threadIdx.x] = x; __syncthreads(); // contiguous range pattern for(int offset = blockDim.x / 2; offset > 0; offset >>= 1){ if(threadIdx.x < offset){ // add a partial sum upstream to our own global_mem[threadIdx.x] += global_mem[threadIdx.x + offset]; } // wait until all threads in the block have // updated their partial sums __syncthreads(); } // thread 0 writes the final result if(threadIdx.x == 0){ sum[blockIdx.x] = global_mem[0]; } __syncthreads(); }
2c4d3971bf21e33a73b6b1dab6e0263771b1e5c2.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/types.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <cmath> namespace { template <typename scalar_t> __global__ void init_lookupTable_cuda_kernel( torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> lookTable, const torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> epp, const int w, const int h, const int h2, const int searchRange, const float max_depth, const float verRange, const float horRange) { const int linear_index_pos = blockIdx.x * blockDim.x + threadIdx.x; int cw; int ch; int xx; int yy; int count; float projx; float projy; float ratio; float curLen; for(int i = linear_index_pos; i < h2 * w; i = i + blockDim.x * gridDim.x){ ch = linear_index_pos / w; cw = linear_index_pos - ch * w; count = 0; curLen = (epp[0] - cw) * (epp[0] - cw) + (epp[1] - ch) * (epp[1] - ch); for (int sxx = -searchRange; sxx <= searchRange; sxx++){ for (int syy = -searchRange; syy <= searchRange; syy++){ xx = sxx + cw; yy = syy + ch; if((xx > 0) && (yy > 0) && (xx < w) && (yy < h2) && (count < max_depth)){ if (!((sxx == 0) && (syy == 0))){ ratio = ((epp[0] - cw) * sxx + (epp[1] - ch) * syy) / curLen; projx = ratio * (epp[0] - cw); projy = ratio * (epp[1] - ch); if((sqrt(projx * projx + projy * projy) < verRange) && (sqrt((sxx - projx) * (sxx - projx) + (syy - projy) * (syy - projy)) < horRange)){ if (((projx * (epp[0] - cw) + projy * (epp[1] - ch)) > 0) && ((projx * projx + projy * projy) < curLen)){ lookTable[ch][cw][count + 1][0] = xx; lookTable[ch][cw][count + 1][1] = yy; count = count + 1; } } } } } } lookTable[ch][cw][0][0] = count; } } template <typename scalar_t> __global__ void lidar_denoise_cuda_kernel( torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> nvelo_projected_img, torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> lookTable, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> noocc_mask, torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> epp, const int w, const int h2, const float mind1d2, const float maxd2) { const int linear_index_pos = blockIdx.x * blockDim.x + threadIdx.x; int cw; int ch; int xx; int yy; float distance1; float distance2; float refx; float refy; float lrefx; float lrefy; for(int i = linear_index_pos; i < h2 * w; i = i + blockDim.x * gridDim.x){ ch = linear_index_pos / w; cw = linear_index_pos - ch * w; // if(nvelo_projected_img[ch][cw][2] > 0){ if(noocc_mask[ch][cw] > 0){ refx = nvelo_projected_img[ch][cw][3]; refy = nvelo_projected_img[ch][cw][4]; lrefx = nvelo_projected_img[ch][cw][0]; lrefy = nvelo_projected_img[ch][cw][1]; for(int j = 0; j < lookTable[ch][cw][0][0]; j++){ if (noocc_mask[ch][cw] < 0.9){ break; } xx = lookTable[ch][cw][j+1][0]; yy = lookTable[ch][cw][j+1][1]; distance2 =((nvelo_projected_img[yy][xx][3] - refx) * (epp[0] - refx) + (nvelo_projected_img[yy][xx][4] - refy) * (epp[1] - refy)) / sqrt((epp[0] - refx)*(epp[0] - refx) + (epp[1] - refy)*(epp[1] - refy)); distance1 =((nvelo_projected_img[yy][xx][0] - lrefx) * (epp[0] - lrefx) + (nvelo_projected_img[yy][xx][1] - lrefy) * (epp[1] - lrefy)) / sqrt((epp[0] - lrefx)*(epp[0] - lrefx) + (epp[1] - lrefy)*(epp[1] - lrefy)); if((distance1 > 0) && (distance2 < 0) && ((distance1 - distance2) > mind1d2) && (abs(distance2) < maxd2)){ // if((distance1 > 0) && (distance2 < 0)){ noocc_mask[ch][cw] = 0; } //if(nvelo_projected_img[yy][xx][2] > 0.1){ // mul1 = (nvelo_projected_img[yy][xx][3] - refx) * (epp[0] - refx) + (nvelo_projected_img[yy][xx][4] - refy) * (epp[1] - refy); // mul2 = (nvelo_projected_img[yy][xx][3] - epp[0]) * (refx - epp[0]) + (nvelo_projected_img[yy][xx][4] - epp[1]) * (refy - epp[1]); // if ((mul1 < 0) || (mul2 < 0)){ // mul1 = (nvelo_projected_img[yy][xx][0] - nvelo_projected_img[ch][cw][0]) * (epp[0] - nvelo_projected_img[ch][cw][0]) + (nvelo_projected_img[yy][xx][1] - nvelo_projected_img[ch][cw][1]) * (epp[1] - nvelo_projected_img[ch][cw][1]); // mul2 = (nvelo_projected_img[yy][xx][0] - epp[0]) * (nvelo_projected_img[ch][cw][0] - epp[0]) + (nvelo_projected_img[yy][xx][1] - epp[1]) * (nvelo_projected_img[ch][cw][1] - epp[1]); // if ((mul1 > 0) && (mul2 > 0)){ // noocc_mask[ch][cw] = 0; // } // } //} } } } } } // namespace std::vector<torch::Tensor> init_lookupTable_cuda( torch::Tensor lookTable, torch::Tensor epp, int w, int h, int h2, int searchRange, float max_depth, float verRange, float horRange ) { const int threads = 1024; const dim3 blocks(((h2 * w) + threads - 1) / threads, threads); AT_DISPATCH_FLOATING_TYPES(lookTable.type(), "initialize look up table", ([&] { hipLaunchKernelGGL(( init_lookupTable_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, lookTable.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), epp.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(), w, h, h2, searchRange, max_depth, verRange, horRange ); })); return {lookTable}; } std::vector<torch::Tensor> lidar_denoise_cuda( torch::Tensor nvelo_projected_img, torch::Tensor lookTable, torch::Tensor noocc_mask, torch::Tensor epp, int w, int h2, float mind1d2, float maxd2 ) { const int threads = 1024; const dim3 blocks(((h2 * w) + threads - 1) / threads, threads); AT_DISPATCH_FLOATING_TYPES(nvelo_projected_img.type(), "denoise lidar scan", ([&] { hipLaunchKernelGGL(( lidar_denoise_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, nvelo_projected_img.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), lookTable.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), noocc_mask.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), epp.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(), w, h2, mind1d2, maxd2 ); })); return {noocc_mask}; }
2c4d3971bf21e33a73b6b1dab6e0263771b1e5c2.cu
#include <torch/types.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <cmath> namespace { template <typename scalar_t> __global__ void init_lookupTable_cuda_kernel( torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> lookTable, const torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> epp, const int w, const int h, const int h2, const int searchRange, const float max_depth, const float verRange, const float horRange) { const int linear_index_pos = blockIdx.x * blockDim.x + threadIdx.x; int cw; int ch; int xx; int yy; int count; float projx; float projy; float ratio; float curLen; for(int i = linear_index_pos; i < h2 * w; i = i + blockDim.x * gridDim.x){ ch = linear_index_pos / w; cw = linear_index_pos - ch * w; count = 0; curLen = (epp[0] - cw) * (epp[0] - cw) + (epp[1] - ch) * (epp[1] - ch); for (int sxx = -searchRange; sxx <= searchRange; sxx++){ for (int syy = -searchRange; syy <= searchRange; syy++){ xx = sxx + cw; yy = syy + ch; if((xx > 0) && (yy > 0) && (xx < w) && (yy < h2) && (count < max_depth)){ if (!((sxx == 0) && (syy == 0))){ ratio = ((epp[0] - cw) * sxx + (epp[1] - ch) * syy) / curLen; projx = ratio * (epp[0] - cw); projy = ratio * (epp[1] - ch); if((sqrt(projx * projx + projy * projy) < verRange) && (sqrt((sxx - projx) * (sxx - projx) + (syy - projy) * (syy - projy)) < horRange)){ if (((projx * (epp[0] - cw) + projy * (epp[1] - ch)) > 0) && ((projx * projx + projy * projy) < curLen)){ lookTable[ch][cw][count + 1][0] = xx; lookTable[ch][cw][count + 1][1] = yy; count = count + 1; } } } } } } lookTable[ch][cw][0][0] = count; } } template <typename scalar_t> __global__ void lidar_denoise_cuda_kernel( torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> nvelo_projected_img, torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> lookTable, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> noocc_mask, torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> epp, const int w, const int h2, const float mind1d2, const float maxd2) { const int linear_index_pos = blockIdx.x * blockDim.x + threadIdx.x; int cw; int ch; int xx; int yy; float distance1; float distance2; float refx; float refy; float lrefx; float lrefy; for(int i = linear_index_pos; i < h2 * w; i = i + blockDim.x * gridDim.x){ ch = linear_index_pos / w; cw = linear_index_pos - ch * w; // if(nvelo_projected_img[ch][cw][2] > 0){ if(noocc_mask[ch][cw] > 0){ refx = nvelo_projected_img[ch][cw][3]; refy = nvelo_projected_img[ch][cw][4]; lrefx = nvelo_projected_img[ch][cw][0]; lrefy = nvelo_projected_img[ch][cw][1]; for(int j = 0; j < lookTable[ch][cw][0][0]; j++){ if (noocc_mask[ch][cw] < 0.9){ break; } xx = lookTable[ch][cw][j+1][0]; yy = lookTable[ch][cw][j+1][1]; distance2 =((nvelo_projected_img[yy][xx][3] - refx) * (epp[0] - refx) + (nvelo_projected_img[yy][xx][4] - refy) * (epp[1] - refy)) / sqrt((epp[0] - refx)*(epp[0] - refx) + (epp[1] - refy)*(epp[1] - refy)); distance1 =((nvelo_projected_img[yy][xx][0] - lrefx) * (epp[0] - lrefx) + (nvelo_projected_img[yy][xx][1] - lrefy) * (epp[1] - lrefy)) / sqrt((epp[0] - lrefx)*(epp[0] - lrefx) + (epp[1] - lrefy)*(epp[1] - lrefy)); if((distance1 > 0) && (distance2 < 0) && ((distance1 - distance2) > mind1d2) && (abs(distance2) < maxd2)){ // if((distance1 > 0) && (distance2 < 0)){ noocc_mask[ch][cw] = 0; } //if(nvelo_projected_img[yy][xx][2] > 0.1){ // mul1 = (nvelo_projected_img[yy][xx][3] - refx) * (epp[0] - refx) + (nvelo_projected_img[yy][xx][4] - refy) * (epp[1] - refy); // mul2 = (nvelo_projected_img[yy][xx][3] - epp[0]) * (refx - epp[0]) + (nvelo_projected_img[yy][xx][4] - epp[1]) * (refy - epp[1]); // if ((mul1 < 0) || (mul2 < 0)){ // mul1 = (nvelo_projected_img[yy][xx][0] - nvelo_projected_img[ch][cw][0]) * (epp[0] - nvelo_projected_img[ch][cw][0]) + (nvelo_projected_img[yy][xx][1] - nvelo_projected_img[ch][cw][1]) * (epp[1] - nvelo_projected_img[ch][cw][1]); // mul2 = (nvelo_projected_img[yy][xx][0] - epp[0]) * (nvelo_projected_img[ch][cw][0] - epp[0]) + (nvelo_projected_img[yy][xx][1] - epp[1]) * (nvelo_projected_img[ch][cw][1] - epp[1]); // if ((mul1 > 0) && (mul2 > 0)){ // noocc_mask[ch][cw] = 0; // } // } //} } } } } } // namespace std::vector<torch::Tensor> init_lookupTable_cuda( torch::Tensor lookTable, torch::Tensor epp, int w, int h, int h2, int searchRange, float max_depth, float verRange, float horRange ) { const int threads = 1024; const dim3 blocks(((h2 * w) + threads - 1) / threads, threads); AT_DISPATCH_FLOATING_TYPES(lookTable.type(), "initialize look up table", ([&] { init_lookupTable_cuda_kernel<scalar_t><<<blocks, threads>>>( lookTable.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), epp.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(), w, h, h2, searchRange, max_depth, verRange, horRange ); })); return {lookTable}; } std::vector<torch::Tensor> lidar_denoise_cuda( torch::Tensor nvelo_projected_img, torch::Tensor lookTable, torch::Tensor noocc_mask, torch::Tensor epp, int w, int h2, float mind1d2, float maxd2 ) { const int threads = 1024; const dim3 blocks(((h2 * w) + threads - 1) / threads, threads); AT_DISPATCH_FLOATING_TYPES(nvelo_projected_img.type(), "denoise lidar scan", ([&] { lidar_denoise_cuda_kernel<scalar_t><<<blocks, threads>>>( nvelo_projected_img.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), lookTable.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), noocc_mask.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(), epp.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(), w, h2, mind1d2, maxd2 ); })); return {noocc_mask}; }
67ff23c5ff5565fe18fb5172a6446e719c5a7296.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // Modified by Wei Liu // ------------------------------------------------------------------ #include <vector> #include "caffe/layers/smooth_L1_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_ROCM namespace caffe { template <typename Dtype> __global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out) { // f(x) = 0.5 * x^2 if |x| < 1 // |x| - 0.5 otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = 0.5 * val * val; } else { out[index] = abs_val - 0.5; } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); // d := b0 - b1 if (has_weights_) { caffe_gpu_mul( count, bottom[2]->gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data()); // d := w * (b0 - b1) } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SmoothL1Forward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diff_.gpu_data(), errors_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; Dtype loss; caffe_gpu_asum(count, errors_.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); } template <typename Dtype> __global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out) { // f'(x) = x if |x| < 1 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = diff_.count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SmoothL1Backward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diff_.gpu_data(), diff_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); caffe_gpu_axpby( bottom[i]->count(), // count alpha, // alpha diff_.gpu_data(), // x Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // y } } } INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); } // namespace caffe #endif //USE_ROCM
67ff23c5ff5565fe18fb5172a6446e719c5a7296.cu
// ------------------------------------------------------------------ // Fast R-CNN // copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // Modified by Wei Liu // ------------------------------------------------------------------ #include <vector> #include "caffe/layers/smooth_L1_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_CUDA namespace caffe { template <typename Dtype> __global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out) { // f(x) = 0.5 * x^2 if |x| < 1 // |x| - 0.5 otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = 0.5 * val * val; } else { out[index] = abs_val - 0.5; } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); // d := b0 - b1 if (has_weights_) { caffe_gpu_mul( count, bottom[2]->gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data()); // d := w * (b0 - b1) } // NOLINT_NEXT_LINE(whitespace/operators) SmoothL1Forward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diff_.gpu_data(), errors_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; Dtype loss; caffe_gpu_asum(count, errors_.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); } template <typename Dtype> __global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out) { // f'(x) = x if |x| < 1 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = diff_.count(); // NOLINT_NEXT_LINE(whitespace/operators) SmoothL1Backward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diff_.gpu_data(), diff_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); caffe_gpu_axpby( bottom[i]->count(), // count alpha, // alpha diff_.gpu_data(), // x Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // y } } } INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); } // namespace caffe #endif //USE_CUDA
269a479eaf78cf81b4caf382c690b16f3ad8da06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // clang-format off {% set wdesc = "weighted" if weighted else "unweighted" %} #include "fbgemm_gpu/embedding_backward_template_helpers.cuh" #include "fbgemm_gpu/split_embeddings_utils.cuh" {% if not dense %} constexpr int32_t kCacheLocationMissing = -1; {% endif %} constexpr size_t kBackwardMaxThreads = 512; using Tensor = at::Tensor; using namespace fbgemm_gpu; __global__ __launch_bounds__(kMaxThreads) void split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_find_long_segments( const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_linear_indices_num_runs, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_linear_indices_run_lengths, at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> long_run_ids, at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> num_long_run_ids, int32_t max_segment_length_per_warp) { const int32_t num_runs = sorted_linear_indices_num_runs[0]; for (auto run_id = blockIdx.x * blockDim.x + threadIdx.x; run_id < num_runs; run_id += blockDim.x * gridDim.x) { if (sorted_linear_indices_run_lengths[run_id] >= max_segment_length_per_warp) { auto long_run_idx = gpuAtomicIncrement(&num_long_run_ids[0]); long_run_ids[long_run_idx] = run_id; } } } template <typename grad_t> __global__ __launch_bounds__(kMaxThreads) void grad_mean_kernel( const at::PackedTensorAccessor32<grad_t, 2, at::RestrictPtrTraits> grad_output, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets, const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> offsets, at::PackedTensorAccessor32<grad_t, 2, at::RestrictPtrTraits> grad_output_mean) { int32_t B = grad_output.size(0); int32_t T = D_offsets.size(0) - 1; int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y; int32_t b = b_t % B; int32_t t = b_t / B; if (b_t >= B * T) { return; } int32_t D_start = D_offsets[t]; int32_t D_end = D_offsets[t + 1]; int32_t D = D_end - D_start; int64_t indices_start = offsets[t * B + b]; int64_t indices_end = offsets[t * B + b + 1]; int32_t L = indices_end - indices_start; if (L != 0) { for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<grad_t> grad_out_vec(&grad_output[b][D_start + d * 4]); grad_out_vec.mul_(1.0 / L); grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]); } } else { for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<grad_t> grad_out_vec(&grad_output[b][D_start + d * 4]); grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]); } } } {% for nobag in [True, False] %} {% if not nobag or not weighted %} template < typename emb_t, typename grad_t, typename cache_t, size_t kMaxVecsPerThread> __global__ __launch_bounds__(kMaxThreads) void split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1( const at::PackedTensorAccessor32<grad_t, 2, at::RestrictPtrTraits> grad_output, at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> dev_weights, {% if not dense %} at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> uvm_weights, at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits> lxu_cache_weights, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> weights_placements, {% endif %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets, {% if not nobag %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets, {% else %} int32_t B, int64_t D, {% endif %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> hash_size_cumsum, const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_linear_indices_run, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_linear_indices_cumulative_run_lengths, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> long_run_ids, const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> num_long_run_ids, {% if not nobag %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_infos, {% else %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_infos, {% endif %} {% if not dense %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_lxu_cache_locations, {% endif %} {% if weighted %} const at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits> sorted_indice_weights, {% endif %} {% if not dense %} bool stochastic_rounding, at::PhiloxCudaState stochastic_rounding_philox_args, {% else %} at::PackedTensorAccessor64<cache_t, 1, at::RestrictPtrTraits> grad_dev_weights, {% endif %} {% if not nobag %} FixedDivisor fd, {% endif %} {{ args.split_kernel_args | join(", ") }}) { {% if not nobag %} int32_t T = D_offsets.size(0) - 1; const int32_t B = grad_output.size(0); {% else %} int32_t T = weights_offsets.size(0); {% endif %} const int32_t num_long_runs = num_long_run_ids[0]; for (int32_t long_run_id = blockIdx.x; long_run_id < num_long_runs; long_run_id += gridDim.x) { int32_t current_run_id = long_run_ids[long_run_id]; const int64_t linear_index = sorted_linear_indices_run[current_run_id]; const int32_t segment_start = sorted_linear_indices_cumulative_run_lengths[current_run_id]; const int32_t segment_end = sorted_linear_indices_cumulative_run_lengths[current_run_id + 1]; const int32_t SL = segment_end - segment_start; const int32_t warp_id = threadIdx.y; const int32_t lane_id = threadIdx.x; // Note that with shared embedding tables we can have multiple tables // (i.e. different values of `t` sharing the same segment). // const auto info_0 = sorted_infos[segment_start]; {% if not nobag %} int32_t t_0 = fd.Div(info_0); //info_0 / B; {% else %} int32_t t_0 = info_0 % T; {% endif %} int64_t hash_size = hash_size_cumsum[t_0]; {% if not nobag %} int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0]; {% endif %} int64_t idx = linear_index - hash_size; const int32_t SL_per_warp = div_round_up(SL, blockDim.y); const int32_t sl_start = SL_per_warp * warp_id; const int32_t sl_end = min(SL_per_warp * (warp_id + 1), SL); Vec4T<at::acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread]; for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) { int32_t sl_j = sl + threadIdx.x; {% if not nobag %} int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t b; //= b_t % B; int32_t t; //= b_t / B; fd.DivMod(b_t, &t, &b); int32_t D_start = sl_j < sl_end ? D_offsets[t] : 0; {% else %} int64_t l_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t l = l_t / T; {% endif %} {% if weighted %} at::acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0; {% endif %} for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) { {% if not nobag %} int32_t b_j = shfl_sync(b, j); int32_t D_start_j = shfl_sync(D_start, j); {% else %} int32_t l_j = shfl_sync(l, j); {% endif %} {% if weighted %} at::acc_type<cache_t, true> idx_weight_j = shfl_sync(idx_weight, j); {% endif %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; {% if not nobag %} Vec4T<at::acc_type<grad_t, true>> grad_out_vec( &grad_output[b_j][0] + D_start_j + d); {% else %} Vec4T<at::acc_type<grad_t, true>> grad_out_vec(&grad_output[l_j][d]); {% endif %} {% if weighted %} grad_sum[i].fma_(grad_out_vec, idx_weight_j); {% else %} grad_sum[i].add_(grad_out_vec); {% endif %} } } } // do shared memory reduction only if we used multiple blocks. if (SL > SL_per_warp) { struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> smem; Vec4T<at::acc_type<cache_t, true>>* shared_grad_sums = smem.getPointer(); #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = grad_sum[i]; } __syncthreads(); if (blockDim.y >= 32) { if (warp_id < 16) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 16) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 16) { if (warp_id < 8) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 8) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 8) { if (warp_id < 4) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 4) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 4) { if (warp_id < 2) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 2) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (warp_id == 0) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { grad_sum[i] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 1) * kMaxVecsPerThread * kWarpSize]); } } } if (warp_id == 0) { int64_t weights_offset = weights_offsets[t_0]; {% if not dense %} emb_t* __restrict__ weights{nullptr}; cache_t* __restrict__ cache_weights{nullptr}; int32_t D_emb = D; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } const auto weights_placement = static_cast<PlacementType>(weights_placements[t_0]); if (weights_placement == PlacementType::DEVICE) { weights = &dev_weights[weights_offset + idx * D_emb]; } else { weights = &uvm_weights[weights_offset + idx * D_emb]; } if (weights_placement == PlacementType::MANAGED_CACHING) { int32_t cache_idx = sorted_lxu_cache_locations[segment_start]; if (cache_idx != kCacheLocationMissing) { cache_weights = &lxu_cache_weights[cache_idx][0]; } } {% for tensor in args.split_tensors %} at::acc_type<cache_t, true>* __restrict__ {{ tensor }}; const auto {{ tensor }}_placement = static_cast<PlacementType>({{ tensor }}_placements[t_0]); int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0]; if ({{ tensor }}_placement == PlacementType::DEVICE) { {{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset]; } else { {{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset]; } {% endfor %} struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> weight_update_buffer; Vec4T<at::acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer(); auto weight_row_template = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), threadIdx.x + current_run_id * blockDim.x, &state); weight_row_template.set_stoc_state(&state); } float2 qparams_template; if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { qparams_template = weight_row_template.load_qparams(); } {{ split_precomputation }} float2 qparams_new; #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template); auto& grad = grad_sum[i]; {{ split_weight_update }} if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { shared_weight_update_row[lane_id + i * kWarpSize] = weight_new; } else { weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if embedding is not int8 } } if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { // calculate qparams from updated weight row qparams_new = thrust_find_qparams<at::acc_type<cache_t, true>>(shared_weight_update_row, D); weight_row_template.store_qparams(qparams_new); #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; weight_row_template.store(shared_weight_update_row[lane_id + i * kWarpSize], d, qparams_new); } } {% else %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; auto& grad = grad_sum[i]; grad.store(&grad_dev_weights[weights_offset + idx * D + d]); } {% endif %} } } } template < typename emb_t, typename grad_t, typename cache_t, size_t kMaxVecsPerThread> __global__ __launch_bounds__(kBackwardMaxThreads) void split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1( const at::PackedTensorAccessor32<grad_t, 2, at::RestrictPtrTraits> grad_output, at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> dev_weights, {% if not dense %} at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> uvm_weights, at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits> lxu_cache_weights, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> weights_placements, {% endif %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets, {% if not nobag %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets, {% else %} int32_t B, int64_t D, {% endif %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> hash_size_cumsum, const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_linear_indices_run, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_linear_indices_cumulative_run_lengths, {% if not nobag %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_infos, {% else %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_infos, {% endif %} {% if not dense %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_lxu_cache_locations, {% endif %} {% if weighted %} const at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits> sorted_indice_weights, {% endif %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_linear_indices_num_runs, int32_t max_segment_length_per_warp, {% if not dense %} bool stochastic_rounding, at::PhiloxCudaState stochastic_rounding_philox_args, {% else %} at::PackedTensorAccessor64<cache_t, 1, at::RestrictPtrTraits> grad_dev_weights, {% endif %} {% if not nobag %} FixedDivisor fd, {% endif %} {{ args.split_kernel_args | join(", ") }}) { {% if not nobag %} int32_t T = D_offsets.size(0) - 1; const int32_t B = grad_output.size(0); {% else %} int32_t T = weights_offsets.size(0); {% endif %} const int32_t run_id = blockIdx.x * blockDim.y + threadIdx.y; if (run_id >= sorted_linear_indices_run.size(0)) { return; } if (run_id >= sorted_linear_indices_num_runs[0]) { return; } const int64_t linear_index = sorted_linear_indices_run[run_id]; const int32_t segment_start = sorted_linear_indices_cumulative_run_lengths[run_id]; const int32_t segment_end = sorted_linear_indices_cumulative_run_lengths[run_id + 1]; const int32_t SL = segment_end - segment_start; if (SL >= max_segment_length_per_warp) { return; } // now, each segment corresponds to exactly one table `t` and row in // that table (`idx`). Thus, we can hoist out some of the book-keeping. const auto info_0 = sorted_infos[segment_start]; {% if not nobag %} int32_t t_0 = fd.Div(info_0); // info_0 / B; {% else %} int32_t t_0 = info_0 % T; {% endif %} int64_t hash_size = hash_size_cumsum[t_0]; {% if not nobag %} int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0]; {% endif %} int64_t idx = linear_index - hash_size; const int32_t SL_per_warp = div_round_up(SL, blockDim.y); const int32_t sl_start = 0; const int32_t sl_end = SL; Vec4T<at::acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread]; for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) { int32_t sl_j = sl + threadIdx.x; {% if not nobag %} int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t b; //= b_t % B; int32_t t; //= b_t / B; fd.DivMod(b_t, &t, &b); int32_t D_start = D_offsets[t]; {% else %} int64_t l_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t l = l_t / T; {% endif %} {% if weighted %} at::acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0; {% endif %} for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) { {% if not nobag %} int32_t b_j = shfl_sync(b, j); int32_t D_start_j = shfl_sync(D_start, j); {% else %} int32_t l_j = shfl_sync(l, j); {% endif %} {% if weighted %} at::acc_type<cache_t, true> idx_weight_j = shfl_sync(idx_weight, j); {% endif %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; {% if not nobag %} Vec4T<at::acc_type<grad_t, true>> grad_out_vec( &grad_output[b_j][0] + D_start_j + d); {% else %} Vec4T<at::acc_type<grad_t, true>> grad_out_vec(&grad_output[l_j][d]); {% endif %} {% if weighted %} grad_sum[i].fma_(grad_out_vec, idx_weight_j); {% else %} grad_sum[i].add_(grad_out_vec); {% endif %} } } } int64_t weights_offset = weights_offsets[t_0]; {% if not dense %} emb_t* __restrict__ weights{nullptr}; cache_t* __restrict__ cache_weights{nullptr}; int32_t D_emb = D; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } const auto weights_placement = static_cast<PlacementType>(weights_placements[t_0]); if (weights_placement == PlacementType::DEVICE) { weights = &dev_weights[weights_offset + idx * D_emb]; } else { weights = &uvm_weights[weights_offset + idx * D_emb]; } if (weights_placement == PlacementType::MANAGED_CACHING) { int32_t cache_idx = sorted_lxu_cache_locations[segment_start]; if (cache_idx != kCacheLocationMissing) { cache_weights = &lxu_cache_weights[cache_idx][0]; } } {% for tensor in args.split_tensors %} at::acc_type<cache_t, true>* __restrict__ {{ tensor }}; const auto {{ tensor }}_placement = static_cast<PlacementType>({{ tensor }}_placements[t_0]); int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0]; if ({{ tensor }}_placement == PlacementType::DEVICE) { {{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset]; } else { {{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset]; } {% endfor %} struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> weight_update_buffer; Vec4T<at::acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer(); auto weight_row_template = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), threadIdx.x + run_id * blockDim.x, &state); weight_row_template.set_stoc_state(&state); } float2 qparams_template; if (std::is_same<emb_t, uint8_t>::value && !cache_weights){ qparams_template = weight_row_template.load_qparams(); } {{ split_precomputation }} float2 qparams_new; #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template); auto& grad = grad_sum[i]; {{ split_weight_update }} if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize] = weight_new; } else { weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if type is not int8 } } if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { // calculate new qparams after row update qparams_new = thrust_find_qparams<at::acc_type<cache_t, true>>(&shared_weight_update_row[threadIdx.y * kMaxVecsPerThread * kWarpSize], D); weight_row_template.store_qparams(qparams_new); // fetch cached updated row from shared mem and quantize on-the-fly when saving to lowp embedding #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; weight_row_template.store(shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize], d, qparams_new); } } {% else %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; auto& grad = grad_sum[i]; grad.store(&grad_dev_weights[weights_offset + idx * D + d]); } {% endif %} } {{ "void" if not dense else "Tensor" }} split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_exact_cuda( Tensor grad_output, Tensor dev_weights, {% if not dense %} Tensor uvm_weights, Tensor lxu_cache_weights, Tensor weights_placements, {% endif %} Tensor weights_offsets, {% if not nobag %} Tensor D_offsets, int64_t max_D, {% else %} int64_t D, {% endif %} Tensor hash_size_cumsum, int64_t total_hash_size_bits, Tensor indices, Tensor offsets, {% if not nobag %} int64_t pooling_mode, {% endif %} {% if weighted %} Tensor indice_weights, {% endif %} {% if not dense %} Tensor lxu_cache_locations, {% endif %} int64_t unused_, int64_t max_segment_length_per_warp, {% if not dense %} bool stochastic_rounding, {% endif %} {{ args.split_function_args | join(", ") }}) { TENSOR_ON_CUDA_GPU(grad_output); TENSOR_ON_CUDA_GPU(dev_weights); {% if not dense %} TENSOR_ON_CUDA_GPU(uvm_weights); TENSOR_ON_CUDA_GPU(lxu_cache_weights); TENSOR_ON_CUDA_GPU(weights_placements); {% endif %} TENSOR_ON_CUDA_GPU(weights_offsets); {% if not nobag %} TENSOR_ON_CUDA_GPU(D_offsets); {% endif %} TENSOR_ON_CUDA_GPU(hash_size_cumsum); TENSOR_ON_CUDA_GPU(indices); TENSOR_ON_CUDA_GPU(offsets); {% if weighted %} TENSOR_ON_CUDA_GPU(indice_weights); {% endif %} {% if not dense %} TENSOR_ON_CUDA_GPU(lxu_cache_locations); {% endif %} at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(dev_weights.get_device()); {% if dense %} auto grad_dev_weights = zeros_like(dev_weights); {% endif %} // short-circuit if there are zero indices. if (indices.numel() == 0) { return {{ "grad_dev_weights" if dense else "" }}; } {% if not nobag %} int32_t T = D_offsets.numel() - 1; {% else %} int32_t T = weights_offsets.numel(); {% endif %} TORCH_CHECK(T > 0); // offsets = [B x T + 1] const auto B = (offsets.size(0) - 1) / T; TORCH_CHECK(B > 0); auto BT_block_size = kMaxThreads / kWarpSize; TORCH_CHECK(BT_block_size * kWarpSize <= kMaxThreads); {% if not nobag %} TORCH_CHECK(max_D <= {{ max_embedding_dim }}); {% else %} TORCH_CHECK(D <= {{ max_embedding_dim }}); {% endif %} // V100: 96 KB; A100: 160 KB. int max_shared_bytes = 0; #ifndef __HIP_PLATFORM_HCC__ hipDeviceGetAttribute(&max_shared_bytes, hipDeviceAttributeSharedMemPerBlockOptin, dev_weights.get_device()); #else // MI100 has 64 KB local memory (shared memory) per workgroup max_shared_bytes = 64 << 10; #endif C10_HIP_KERNEL_LAUNCH_CHECK(); int shared_kb = max_shared_bytes >> 10; // V100: 64 KB; A100: 96 KB. #ifndef __HIP_PLATFORM_HCC__ // Use 2/3 of the available GPU shared mem; leave rooms for L1$. int used_shared_kb = round_down(shared_kb * 2 / 3, 16); TORCH_CHECK(used_shared_kb > 0); #else // MI100 has independent shared mem and L1 int used_shared_kb = shared_kb; #endif int used_shared_bytes = used_shared_kb << 10; Tensor linear_indices, linear_indices_sorted; Tensor infos_sorted; Tensor sorted_linear_indices_run, sorted_linear_indices_run_lengths, sorted_linear_indices_num_runs, sorted_linear_indices_cumulative_run_lengths; std::tie( linear_indices, linear_indices_sorted, infos_sorted, sorted_linear_indices_run, sorted_linear_indices_run_lengths, sorted_linear_indices_num_runs, sorted_linear_indices_cumulative_run_lengths) = transpose_embedding_input( hash_size_cumsum, total_hash_size_bits, indices, offsets, {{"true" if nobag else "false"}}); {% if not dense %} auto lxu_cache_locations_sorted = at::empty_like(lxu_cache_locations); if (lxu_cache_locations.size(0) > 0) { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(radix_sort_pairs( nullptr, temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), lxu_cache_locations.data_ptr<int32_t>(), lxu_cache_locations_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(at::kByte)); AT_CUDA_CHECK(radix_sort_pairs( temp_storage.data_ptr(), temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), lxu_cache_locations.data_ptr<int32_t>(), lxu_cache_locations_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); } {% endif %} {% if not dense %} DISPATCH_EMB_GRAD_CACHE_TYPES( dev_weights.scalar_type(), grad_output.scalar_type(), lxu_cache_weights.scalar_type(), {% else %} AT_DISPATCH_FLOATING_TYPES_AND_HALF( dev_weights.scalar_type(), {% endif %} "split_embedding_backward_{{ optimizer }}_exact_kernel", [&] { {% if weighted %} auto indice_weights_sorted = at::empty_like(indice_weights); { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(radix_sort_pairs( nullptr, temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), {% if not dense %} indice_weights.data_ptr<at::acc_type<cache_t, true>>(), indice_weights_sorted.data_ptr<at::acc_type<cache_t, true>>(), {% else %} indice_weights.data_ptr<at::acc_type<scalar_t, true>>(), indice_weights_sorted.data_ptr<at::acc_type<scalar_t, true>>(), {% endif %} linear_indices.numel(), 0, total_hash_size_bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(at::kByte)); AT_CUDA_CHECK(radix_sort_pairs( temp_storage.data_ptr(), temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), {% if not dense %} indice_weights.data_ptr<at::acc_type<cache_t, true>>(), indice_weights_sorted.data_ptr<at::acc_type<cache_t, true>>(), {% else %} indice_weights.data_ptr<at::acc_type<scalar_t, true>>(), indice_weights_sorted.data_ptr<at::acc_type<scalar_t, true>>(), {% endif %} linear_indices.numel(), 0, total_hash_size_bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); } {% endif %} // early memory release linear_indices.reset(); linear_indices_sorted.reset(); auto grad_output_accessor = grad_output.packed_accessor32< {{ "at::acc_type<scalar_t, true>" if dense else "grad_t" }}, 2, at::RestrictPtrTraits>(); {% if not nobag %} Tensor grad_output_mean; if (static_cast<PoolingMode>(pooling_mode) == PoolingMode::MEAN) { grad_output_mean = at::empty_like(grad_output); hipLaunchKernelGGL(( grad_mean_kernel<{{ "at::acc_type<scalar_t, true>" if dense else "grad_t" }}>) , dim3(div_round_up((B * T), kMaxThreads / kWarpSize)), dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_accessor, D_offsets .packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), offsets .packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), grad_output_mean.packed_accessor32< {{ "at::acc_type<scalar_t, true>" if dense else "grad_t" }}, 2, at::RestrictPtrTraits>()); C10_HIP_KERNEL_LAUNCH_CHECK(); grad_output_accessor = grad_output_mean.packed_accessor32< {{ "at::acc_type<scalar_t, true>" if dense else "grad_t" }}, 2, at::RestrictPtrTraits>(); } {% endif %} {% if not dense %} at::PhiloxCudaState rng_engine_inputs; if (stochastic_rounding && !std::is_same<emb_t, float>::value) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); std::lock_guard<std::mutex> lock(gen.mutex()); rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen) ->philox_cuda_state(4); } {% endif %} {% for kMaxVecsPerThread in range(1, max_embedding_dim // 128 + 1) %} {% if not nobag %} if (max_D <= {{ 128 * kMaxVecsPerThread }}) { {% else %} if (D <= {{ 128 * kMaxVecsPerThread }}) { {% endif %} // Stay under used_shared_kb of shared memory (V100: 64 KB; A100: 96 KB), BT_block_size must be a power of two. while (BT_block_size * sizeof(at::acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }} >= used_shared_bytes) { BT_block_size /= 2; } TORCH_CHECK(BT_block_size >= 1); if (std::is_same<{{ "scalar_t" if dense else "emb_t" }}, double>::value) { // Otherwise we see CUDA kernel launch failures despite the above checks. BT_block_size = 1; } auto long_run_ids = at::empty_like(sorted_linear_indices_run_lengths); auto num_long_run_ids = at::zeros({1}, indices.options().dtype(at::kLong)); split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }hipLaunchKernelGGL((}_find_long_segments), dim3(div_round_up(sorted_linear_indices_run_lengths.numel(), kMaxThreads)), dim3(kMaxThreads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() , sorted_linear_indices_num_runs.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), sorted_linear_indices_run_lengths.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), num_long_run_ids.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), max_segment_length_per_warp); C10_HIP_KERNEL_LAUNCH_CHECK(); // Check https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory-7-x // "Compute capability 7.x devices allow a single thread block to // address the full capacity of shared memory: 96 KB on Volta, // 64 KB on Turing. Kernels relying on shared memory allocations // over 48 KB per block are architecture-specific, as such they // must use dynamic shared memory (rather than statically sized // arrays) and require an explicit opt-in using hipFuncSetAttribute()". #ifndef __HIP_PLATFORM_HCC__ hipFuncSetAttribute( split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1< {% if not dense %} emb_t, grad_t, cache_t, {% else %} scalar_t, at::acc_type<scalar_t, true>, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>, hipFuncAttributeMaxDynamicSharedMemorySize, used_shared_bytes); // V100: 64 KB; A100: 96 KB. #endif C10_HIP_KERNEL_LAUNCH_CHECK(); // dividing by kMaxThreads is a heuristic to avoid num of blocks far exceeding num_long_run_ids[0] split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }hipLaunchKernelGGL((}_kernel_cta_per_row_1< {% if not dense %} emb_t, grad_t, cache_t, {% else %} scalar_t, at::acc_type<scalar_t, true>, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>) , dim3(div_round_up(long_run_ids.numel(), kMaxThreads)), dim3(dim3(kWarpSize, BT_block_size)), BT_block_size * sizeof(at::acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }}, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_accessor, {% if not dense %} dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(), uvm_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(), lxu_cache_weights.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(), weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} dev_weights.packed_accessor64<scalar_t, 1, at::RestrictPtrTraits>(), {% endif %} weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), {% if not nobag %} D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} B, D, {% endif %} hash_size_cumsum.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), sorted_linear_indices_run .packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), sorted_linear_indices_cumulative_run_lengths .packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), num_long_run_ids.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), {% if not nobag %} infos_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} infos_sorted.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if not dense %} lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if weighted %} indice_weights_sorted.packed_accessor32<at::acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, at::RestrictPtrTraits>(), {% endif %} {% if not dense %} stochastic_rounding, rng_engine_inputs, {% else %} grad_dev_weights.packed_accessor64<scalar_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if not nobag %} FixedDivisor(B), {% endif %} {{ args.split_kernel_arg_constructors | join(", ") }}); C10_HIP_KERNEL_LAUNCH_CHECK(); #ifndef __HIP_PLATFORM_HCC__ hipFuncSetAttribute( split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1< {% if not dense %} emb_t, grad_t, cache_t, {% else %} scalar_t, at::acc_type<scalar_t, true>, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>, hipFuncAttributeMaxDynamicSharedMemorySize, used_shared_bytes); // V100: 64 KB; A100: 96 KB. #endif C10_HIP_KERNEL_LAUNCH_CHECK(); split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }hipLaunchKernelGGL((}_kernel_warp_per_row_1< {% if not dense %} emb_t, grad_t, cache_t, {% else %} scalar_t, at::acc_type<scalar_t, true>, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>) , dim3(div_round_up(sorted_linear_indices_run.numel(), kBackwardMaxThreads / kWarpSize)), dim3(dim3(kWarpSize, kBackwardMaxThreads / kWarpSize)), BT_block_size * sizeof( at::acc_type< {% if not dense %} cache_t {% else %} scalar_t {% endif %}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }}, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_accessor, {% if not dense %} dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(), uvm_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(), lxu_cache_weights.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(), weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} dev_weights.packed_accessor64<scalar_t, 1, at::RestrictPtrTraits>(), {% endif %} weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), {% if not nobag %} D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} B, D, {% endif %} hash_size_cumsum.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), sorted_linear_indices_run .packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), sorted_linear_indices_cumulative_run_lengths .packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% if not nobag %} infos_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} infos_sorted.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if not dense %} lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if weighted %} indice_weights_sorted.packed_accessor32<at::acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, at::RestrictPtrTraits>(), {% endif %} sorted_linear_indices_num_runs .packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), max_segment_length_per_warp, {% if not dense %} stochastic_rounding, rng_engine_inputs, {% else %} grad_dev_weights.packed_accessor64<scalar_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if not nobag %} FixedDivisor(B), {% endif %} {{ args.split_kernel_arg_constructors | join(", ") }}); C10_HIP_KERNEL_LAUNCH_CHECK(); return; } {% endfor %} }); return {{ "grad_dev_weights" if dense else "" }}; } {% endif %} {% endfor %} // clang-format on
269a479eaf78cf81b4caf382c690b16f3ad8da06.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // clang-format off {% set wdesc = "weighted" if weighted else "unweighted" %} #include "fbgemm_gpu/embedding_backward_template_helpers.cuh" #include "fbgemm_gpu/split_embeddings_utils.cuh" {% if not dense %} constexpr int32_t kCacheLocationMissing = -1; {% endif %} constexpr size_t kBackwardMaxThreads = 512; using Tensor = at::Tensor; using namespace fbgemm_gpu; __global__ __launch_bounds__(kMaxThreads) void split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_find_long_segments( const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_linear_indices_num_runs, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_linear_indices_run_lengths, at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> long_run_ids, at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> num_long_run_ids, int32_t max_segment_length_per_warp) { const int32_t num_runs = sorted_linear_indices_num_runs[0]; for (auto run_id = blockIdx.x * blockDim.x + threadIdx.x; run_id < num_runs; run_id += blockDim.x * gridDim.x) { if (sorted_linear_indices_run_lengths[run_id] >= max_segment_length_per_warp) { auto long_run_idx = gpuAtomicIncrement(&num_long_run_ids[0]); long_run_ids[long_run_idx] = run_id; } } } template <typename grad_t> __global__ __launch_bounds__(kMaxThreads) void grad_mean_kernel( const at::PackedTensorAccessor32<grad_t, 2, at::RestrictPtrTraits> grad_output, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets, const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> offsets, at::PackedTensorAccessor32<grad_t, 2, at::RestrictPtrTraits> grad_output_mean) { int32_t B = grad_output.size(0); int32_t T = D_offsets.size(0) - 1; int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y; int32_t b = b_t % B; int32_t t = b_t / B; if (b_t >= B * T) { return; } int32_t D_start = D_offsets[t]; int32_t D_end = D_offsets[t + 1]; int32_t D = D_end - D_start; int64_t indices_start = offsets[t * B + b]; int64_t indices_end = offsets[t * B + b + 1]; int32_t L = indices_end - indices_start; if (L != 0) { for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<grad_t> grad_out_vec(&grad_output[b][D_start + d * 4]); grad_out_vec.mul_(1.0 / L); grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]); } } else { for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<grad_t> grad_out_vec(&grad_output[b][D_start + d * 4]); grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]); } } } {% for nobag in [True, False] %} {% if not nobag or not weighted %} template < typename emb_t, typename grad_t, typename cache_t, size_t kMaxVecsPerThread> __global__ __launch_bounds__(kMaxThreads) void split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1( const at::PackedTensorAccessor32<grad_t, 2, at::RestrictPtrTraits> grad_output, at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> dev_weights, {% if not dense %} at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> uvm_weights, at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits> lxu_cache_weights, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> weights_placements, {% endif %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets, {% if not nobag %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets, {% else %} int32_t B, int64_t D, {% endif %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> hash_size_cumsum, const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_linear_indices_run, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_linear_indices_cumulative_run_lengths, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> long_run_ids, const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> num_long_run_ids, {% if not nobag %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_infos, {% else %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_infos, {% endif %} {% if not dense %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_lxu_cache_locations, {% endif %} {% if weighted %} const at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits> sorted_indice_weights, {% endif %} {% if not dense %} bool stochastic_rounding, at::PhiloxCudaState stochastic_rounding_philox_args, {% else %} at::PackedTensorAccessor64<cache_t, 1, at::RestrictPtrTraits> grad_dev_weights, {% endif %} {% if not nobag %} FixedDivisor fd, {% endif %} {{ args.split_kernel_args | join(", ") }}) { {% if not nobag %} int32_t T = D_offsets.size(0) - 1; const int32_t B = grad_output.size(0); {% else %} int32_t T = weights_offsets.size(0); {% endif %} const int32_t num_long_runs = num_long_run_ids[0]; for (int32_t long_run_id = blockIdx.x; long_run_id < num_long_runs; long_run_id += gridDim.x) { int32_t current_run_id = long_run_ids[long_run_id]; const int64_t linear_index = sorted_linear_indices_run[current_run_id]; const int32_t segment_start = sorted_linear_indices_cumulative_run_lengths[current_run_id]; const int32_t segment_end = sorted_linear_indices_cumulative_run_lengths[current_run_id + 1]; const int32_t SL = segment_end - segment_start; const int32_t warp_id = threadIdx.y; const int32_t lane_id = threadIdx.x; // Note that with shared embedding tables we can have multiple tables // (i.e. different values of `t` sharing the same segment). // const auto info_0 = sorted_infos[segment_start]; {% if not nobag %} int32_t t_0 = fd.Div(info_0); //info_0 / B; {% else %} int32_t t_0 = info_0 % T; {% endif %} int64_t hash_size = hash_size_cumsum[t_0]; {% if not nobag %} int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0]; {% endif %} int64_t idx = linear_index - hash_size; const int32_t SL_per_warp = div_round_up(SL, blockDim.y); const int32_t sl_start = SL_per_warp * warp_id; const int32_t sl_end = min(SL_per_warp * (warp_id + 1), SL); Vec4T<at::acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread]; for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) { int32_t sl_j = sl + threadIdx.x; {% if not nobag %} int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t b; //= b_t % B; int32_t t; //= b_t / B; fd.DivMod(b_t, &t, &b); int32_t D_start = sl_j < sl_end ? D_offsets[t] : 0; {% else %} int64_t l_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t l = l_t / T; {% endif %} {% if weighted %} at::acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0; {% endif %} for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) { {% if not nobag %} int32_t b_j = shfl_sync(b, j); int32_t D_start_j = shfl_sync(D_start, j); {% else %} int32_t l_j = shfl_sync(l, j); {% endif %} {% if weighted %} at::acc_type<cache_t, true> idx_weight_j = shfl_sync(idx_weight, j); {% endif %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; {% if not nobag %} Vec4T<at::acc_type<grad_t, true>> grad_out_vec( &grad_output[b_j][0] + D_start_j + d); {% else %} Vec4T<at::acc_type<grad_t, true>> grad_out_vec(&grad_output[l_j][d]); {% endif %} {% if weighted %} grad_sum[i].fma_(grad_out_vec, idx_weight_j); {% else %} grad_sum[i].add_(grad_out_vec); {% endif %} } } } // do shared memory reduction only if we used multiple blocks. if (SL > SL_per_warp) { struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> smem; Vec4T<at::acc_type<cache_t, true>>* shared_grad_sums = smem.getPointer(); #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = grad_sum[i]; } __syncthreads(); if (blockDim.y >= 32) { if (warp_id < 16) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 16) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 16) { if (warp_id < 8) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 8) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 8) { if (warp_id < 4) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 4) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 4) { if (warp_id < 2) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 2) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (warp_id == 0) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { grad_sum[i] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 1) * kMaxVecsPerThread * kWarpSize]); } } } if (warp_id == 0) { int64_t weights_offset = weights_offsets[t_0]; {% if not dense %} emb_t* __restrict__ weights{nullptr}; cache_t* __restrict__ cache_weights{nullptr}; int32_t D_emb = D; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } const auto weights_placement = static_cast<PlacementType>(weights_placements[t_0]); if (weights_placement == PlacementType::DEVICE) { weights = &dev_weights[weights_offset + idx * D_emb]; } else { weights = &uvm_weights[weights_offset + idx * D_emb]; } if (weights_placement == PlacementType::MANAGED_CACHING) { int32_t cache_idx = sorted_lxu_cache_locations[segment_start]; if (cache_idx != kCacheLocationMissing) { cache_weights = &lxu_cache_weights[cache_idx][0]; } } {% for tensor in args.split_tensors %} at::acc_type<cache_t, true>* __restrict__ {{ tensor }}; const auto {{ tensor }}_placement = static_cast<PlacementType>({{ tensor }}_placements[t_0]); int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0]; if ({{ tensor }}_placement == PlacementType::DEVICE) { {{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset]; } else { {{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset]; } {% endfor %} struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> weight_update_buffer; Vec4T<at::acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer(); auto weight_row_template = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), threadIdx.x + current_run_id * blockDim.x, &state); weight_row_template.set_stoc_state(&state); } float2 qparams_template; if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { qparams_template = weight_row_template.load_qparams(); } {{ split_precomputation }} float2 qparams_new; #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template); auto& grad = grad_sum[i]; {{ split_weight_update }} if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { shared_weight_update_row[lane_id + i * kWarpSize] = weight_new; } else { weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if embedding is not int8 } } if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { // calculate qparams from updated weight row qparams_new = thrust_find_qparams<at::acc_type<cache_t, true>>(shared_weight_update_row, D); weight_row_template.store_qparams(qparams_new); #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; weight_row_template.store(shared_weight_update_row[lane_id + i * kWarpSize], d, qparams_new); } } {% else %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; auto& grad = grad_sum[i]; grad.store(&grad_dev_weights[weights_offset + idx * D + d]); } {% endif %} } } } template < typename emb_t, typename grad_t, typename cache_t, size_t kMaxVecsPerThread> __global__ __launch_bounds__(kBackwardMaxThreads) void split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1( const at::PackedTensorAccessor32<grad_t, 2, at::RestrictPtrTraits> grad_output, at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> dev_weights, {% if not dense %} at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> uvm_weights, at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits> lxu_cache_weights, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> weights_placements, {% endif %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets, {% if not nobag %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets, {% else %} int32_t B, int64_t D, {% endif %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> hash_size_cumsum, const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_linear_indices_run, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_linear_indices_cumulative_run_lengths, {% if not nobag %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_infos, {% else %} const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_infos, {% endif %} {% if not dense %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_lxu_cache_locations, {% endif %} {% if weighted %} const at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits> sorted_indice_weights, {% endif %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_linear_indices_num_runs, int32_t max_segment_length_per_warp, {% if not dense %} bool stochastic_rounding, at::PhiloxCudaState stochastic_rounding_philox_args, {% else %} at::PackedTensorAccessor64<cache_t, 1, at::RestrictPtrTraits> grad_dev_weights, {% endif %} {% if not nobag %} FixedDivisor fd, {% endif %} {{ args.split_kernel_args | join(", ") }}) { {% if not nobag %} int32_t T = D_offsets.size(0) - 1; const int32_t B = grad_output.size(0); {% else %} int32_t T = weights_offsets.size(0); {% endif %} const int32_t run_id = blockIdx.x * blockDim.y + threadIdx.y; if (run_id >= sorted_linear_indices_run.size(0)) { return; } if (run_id >= sorted_linear_indices_num_runs[0]) { return; } const int64_t linear_index = sorted_linear_indices_run[run_id]; const int32_t segment_start = sorted_linear_indices_cumulative_run_lengths[run_id]; const int32_t segment_end = sorted_linear_indices_cumulative_run_lengths[run_id + 1]; const int32_t SL = segment_end - segment_start; if (SL >= max_segment_length_per_warp) { return; } // now, each segment corresponds to exactly one table `t` and row in // that table (`idx`). Thus, we can hoist out some of the book-keeping. const auto info_0 = sorted_infos[segment_start]; {% if not nobag %} int32_t t_0 = fd.Div(info_0); // info_0 / B; {% else %} int32_t t_0 = info_0 % T; {% endif %} int64_t hash_size = hash_size_cumsum[t_0]; {% if not nobag %} int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0]; {% endif %} int64_t idx = linear_index - hash_size; const int32_t SL_per_warp = div_round_up(SL, blockDim.y); const int32_t sl_start = 0; const int32_t sl_end = SL; Vec4T<at::acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread]; for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) { int32_t sl_j = sl + threadIdx.x; {% if not nobag %} int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t b; //= b_t % B; int32_t t; //= b_t / B; fd.DivMod(b_t, &t, &b); int32_t D_start = D_offsets[t]; {% else %} int64_t l_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t l = l_t / T; {% endif %} {% if weighted %} at::acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0; {% endif %} for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) { {% if not nobag %} int32_t b_j = shfl_sync(b, j); int32_t D_start_j = shfl_sync(D_start, j); {% else %} int32_t l_j = shfl_sync(l, j); {% endif %} {% if weighted %} at::acc_type<cache_t, true> idx_weight_j = shfl_sync(idx_weight, j); {% endif %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; {% if not nobag %} Vec4T<at::acc_type<grad_t, true>> grad_out_vec( &grad_output[b_j][0] + D_start_j + d); {% else %} Vec4T<at::acc_type<grad_t, true>> grad_out_vec(&grad_output[l_j][d]); {% endif %} {% if weighted %} grad_sum[i].fma_(grad_out_vec, idx_weight_j); {% else %} grad_sum[i].add_(grad_out_vec); {% endif %} } } } int64_t weights_offset = weights_offsets[t_0]; {% if not dense %} emb_t* __restrict__ weights{nullptr}; cache_t* __restrict__ cache_weights{nullptr}; int32_t D_emb = D; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } const auto weights_placement = static_cast<PlacementType>(weights_placements[t_0]); if (weights_placement == PlacementType::DEVICE) { weights = &dev_weights[weights_offset + idx * D_emb]; } else { weights = &uvm_weights[weights_offset + idx * D_emb]; } if (weights_placement == PlacementType::MANAGED_CACHING) { int32_t cache_idx = sorted_lxu_cache_locations[segment_start]; if (cache_idx != kCacheLocationMissing) { cache_weights = &lxu_cache_weights[cache_idx][0]; } } {% for tensor in args.split_tensors %} at::acc_type<cache_t, true>* __restrict__ {{ tensor }}; const auto {{ tensor }}_placement = static_cast<PlacementType>({{ tensor }}_placements[t_0]); int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0]; if ({{ tensor }}_placement == PlacementType::DEVICE) { {{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset]; } else { {{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset]; } {% endfor %} struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> weight_update_buffer; Vec4T<at::acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer(); auto weight_row_template = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), threadIdx.x + run_id * blockDim.x, &state); weight_row_template.set_stoc_state(&state); } float2 qparams_template; if (std::is_same<emb_t, uint8_t>::value && !cache_weights){ qparams_template = weight_row_template.load_qparams(); } {{ split_precomputation }} float2 qparams_new; #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template); auto& grad = grad_sum[i]; {{ split_weight_update }} if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize] = weight_new; } else { weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if type is not int8 } } if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { // calculate new qparams after row update qparams_new = thrust_find_qparams<at::acc_type<cache_t, true>>(&shared_weight_update_row[threadIdx.y * kMaxVecsPerThread * kWarpSize], D); weight_row_template.store_qparams(qparams_new); // fetch cached updated row from shared mem and quantize on-the-fly when saving to lowp embedding #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; weight_row_template.store(shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize], d, qparams_new); } } {% else %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; auto& grad = grad_sum[i]; grad.store(&grad_dev_weights[weights_offset + idx * D + d]); } {% endif %} } {{ "void" if not dense else "Tensor" }} split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_exact_cuda( Tensor grad_output, Tensor dev_weights, {% if not dense %} Tensor uvm_weights, Tensor lxu_cache_weights, Tensor weights_placements, {% endif %} Tensor weights_offsets, {% if not nobag %} Tensor D_offsets, int64_t max_D, {% else %} int64_t D, {% endif %} Tensor hash_size_cumsum, int64_t total_hash_size_bits, Tensor indices, Tensor offsets, {% if not nobag %} int64_t pooling_mode, {% endif %} {% if weighted %} Tensor indice_weights, {% endif %} {% if not dense %} Tensor lxu_cache_locations, {% endif %} int64_t unused_, int64_t max_segment_length_per_warp, {% if not dense %} bool stochastic_rounding, {% endif %} {{ args.split_function_args | join(", ") }}) { TENSOR_ON_CUDA_GPU(grad_output); TENSOR_ON_CUDA_GPU(dev_weights); {% if not dense %} TENSOR_ON_CUDA_GPU(uvm_weights); TENSOR_ON_CUDA_GPU(lxu_cache_weights); TENSOR_ON_CUDA_GPU(weights_placements); {% endif %} TENSOR_ON_CUDA_GPU(weights_offsets); {% if not nobag %} TENSOR_ON_CUDA_GPU(D_offsets); {% endif %} TENSOR_ON_CUDA_GPU(hash_size_cumsum); TENSOR_ON_CUDA_GPU(indices); TENSOR_ON_CUDA_GPU(offsets); {% if weighted %} TENSOR_ON_CUDA_GPU(indice_weights); {% endif %} {% if not dense %} TENSOR_ON_CUDA_GPU(lxu_cache_locations); {% endif %} at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(dev_weights.get_device()); {% if dense %} auto grad_dev_weights = zeros_like(dev_weights); {% endif %} // short-circuit if there are zero indices. if (indices.numel() == 0) { return {{ "grad_dev_weights" if dense else "" }}; } {% if not nobag %} int32_t T = D_offsets.numel() - 1; {% else %} int32_t T = weights_offsets.numel(); {% endif %} TORCH_CHECK(T > 0); // offsets = [B x T + 1] const auto B = (offsets.size(0) - 1) / T; TORCH_CHECK(B > 0); auto BT_block_size = kMaxThreads / kWarpSize; TORCH_CHECK(BT_block_size * kWarpSize <= kMaxThreads); {% if not nobag %} TORCH_CHECK(max_D <= {{ max_embedding_dim }}); {% else %} TORCH_CHECK(D <= {{ max_embedding_dim }}); {% endif %} // V100: 96 KB; A100: 160 KB. int max_shared_bytes = 0; #ifndef __HIP_PLATFORM_HCC__ cudaDeviceGetAttribute(&max_shared_bytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, dev_weights.get_device()); #else // MI100 has 64 KB local memory (shared memory) per workgroup max_shared_bytes = 64 << 10; #endif C10_CUDA_KERNEL_LAUNCH_CHECK(); int shared_kb = max_shared_bytes >> 10; // V100: 64 KB; A100: 96 KB. #ifndef __HIP_PLATFORM_HCC__ // Use 2/3 of the available GPU shared mem; leave rooms for L1$. int used_shared_kb = round_down(shared_kb * 2 / 3, 16); TORCH_CHECK(used_shared_kb > 0); #else // MI100 has independent shared mem and L1 int used_shared_kb = shared_kb; #endif int used_shared_bytes = used_shared_kb << 10; Tensor linear_indices, linear_indices_sorted; Tensor infos_sorted; Tensor sorted_linear_indices_run, sorted_linear_indices_run_lengths, sorted_linear_indices_num_runs, sorted_linear_indices_cumulative_run_lengths; std::tie( linear_indices, linear_indices_sorted, infos_sorted, sorted_linear_indices_run, sorted_linear_indices_run_lengths, sorted_linear_indices_num_runs, sorted_linear_indices_cumulative_run_lengths) = transpose_embedding_input( hash_size_cumsum, total_hash_size_bits, indices, offsets, {{"true" if nobag else "false"}}); {% if not dense %} auto lxu_cache_locations_sorted = at::empty_like(lxu_cache_locations); if (lxu_cache_locations.size(0) > 0) { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(radix_sort_pairs( nullptr, temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), lxu_cache_locations.data_ptr<int32_t>(), lxu_cache_locations_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::cuda::getCurrentCUDAStream(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(at::kByte)); AT_CUDA_CHECK(radix_sort_pairs( temp_storage.data_ptr(), temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), lxu_cache_locations.data_ptr<int32_t>(), lxu_cache_locations_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::cuda::getCurrentCUDAStream(), false)); } {% endif %} {% if not dense %} DISPATCH_EMB_GRAD_CACHE_TYPES( dev_weights.scalar_type(), grad_output.scalar_type(), lxu_cache_weights.scalar_type(), {% else %} AT_DISPATCH_FLOATING_TYPES_AND_HALF( dev_weights.scalar_type(), {% endif %} "split_embedding_backward_{{ optimizer }}_exact_kernel", [&] { {% if weighted %} auto indice_weights_sorted = at::empty_like(indice_weights); { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(radix_sort_pairs( nullptr, temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), {% if not dense %} indice_weights.data_ptr<at::acc_type<cache_t, true>>(), indice_weights_sorted.data_ptr<at::acc_type<cache_t, true>>(), {% else %} indice_weights.data_ptr<at::acc_type<scalar_t, true>>(), indice_weights_sorted.data_ptr<at::acc_type<scalar_t, true>>(), {% endif %} linear_indices.numel(), 0, total_hash_size_bits, at::cuda::getCurrentCUDAStream(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(at::kByte)); AT_CUDA_CHECK(radix_sort_pairs( temp_storage.data_ptr(), temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), {% if not dense %} indice_weights.data_ptr<at::acc_type<cache_t, true>>(), indice_weights_sorted.data_ptr<at::acc_type<cache_t, true>>(), {% else %} indice_weights.data_ptr<at::acc_type<scalar_t, true>>(), indice_weights_sorted.data_ptr<at::acc_type<scalar_t, true>>(), {% endif %} linear_indices.numel(), 0, total_hash_size_bits, at::cuda::getCurrentCUDAStream(), false)); } {% endif %} // early memory release linear_indices.reset(); linear_indices_sorted.reset(); auto grad_output_accessor = grad_output.packed_accessor32< {{ "at::acc_type<scalar_t, true>" if dense else "grad_t" }}, 2, at::RestrictPtrTraits>(); {% if not nobag %} Tensor grad_output_mean; if (static_cast<PoolingMode>(pooling_mode) == PoolingMode::MEAN) { grad_output_mean = at::empty_like(grad_output); grad_mean_kernel<{{ "at::acc_type<scalar_t, true>" if dense else "grad_t" }}> <<<div_round_up((B * T), kMaxThreads / kWarpSize), dim3(kWarpSize, kMaxThreads / kWarpSize), 0, at::cuda::getCurrentCUDAStream()>>>( grad_output_accessor, D_offsets .packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), offsets .packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), grad_output_mean.packed_accessor32< {{ "at::acc_type<scalar_t, true>" if dense else "grad_t" }}, 2, at::RestrictPtrTraits>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); grad_output_accessor = grad_output_mean.packed_accessor32< {{ "at::acc_type<scalar_t, true>" if dense else "grad_t" }}, 2, at::RestrictPtrTraits>(); } {% endif %} {% if not dense %} at::PhiloxCudaState rng_engine_inputs; if (stochastic_rounding && !std::is_same<emb_t, float>::value) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); std::lock_guard<std::mutex> lock(gen.mutex()); rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen) ->philox_cuda_state(4); } {% endif %} {% for kMaxVecsPerThread in range(1, max_embedding_dim // 128 + 1) %} {% if not nobag %} if (max_D <= {{ 128 * kMaxVecsPerThread }}) { {% else %} if (D <= {{ 128 * kMaxVecsPerThread }}) { {% endif %} // Stay under used_shared_kb of shared memory (V100: 64 KB; A100: 96 KB), BT_block_size must be a power of two. while (BT_block_size * sizeof(at::acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }} >= used_shared_bytes) { BT_block_size /= 2; } TORCH_CHECK(BT_block_size >= 1); if (std::is_same<{{ "scalar_t" if dense else "emb_t" }}, double>::value) { // Otherwise we see CUDA kernel launch failures despite the above checks. BT_block_size = 1; } auto long_run_ids = at::empty_like(sorted_linear_indices_run_lengths); auto num_long_run_ids = at::zeros({1}, indices.options().dtype(at::kLong)); split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_find_long_segments<<< div_round_up(sorted_linear_indices_run_lengths.numel(), kMaxThreads), kMaxThreads, 0, at::cuda::getCurrentCUDAStream() >>>( sorted_linear_indices_num_runs.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), sorted_linear_indices_run_lengths.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), num_long_run_ids.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), max_segment_length_per_warp); C10_CUDA_KERNEL_LAUNCH_CHECK(); // Check https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory-7-x // "Compute capability 7.x devices allow a single thread block to // address the full capacity of shared memory: 96 KB on Volta, // 64 KB on Turing. Kernels relying on shared memory allocations // over 48 KB per block are architecture-specific, as such they // must use dynamic shared memory (rather than statically sized // arrays) and require an explicit opt-in using cudaFuncSetAttribute()". #ifndef __HIP_PLATFORM_HCC__ cudaFuncSetAttribute( split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1< {% if not dense %} emb_t, grad_t, cache_t, {% else %} scalar_t, at::acc_type<scalar_t, true>, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>, cudaFuncAttributeMaxDynamicSharedMemorySize, used_shared_bytes); // V100: 64 KB; A100: 96 KB. #endif C10_CUDA_KERNEL_LAUNCH_CHECK(); // dividing by kMaxThreads is a heuristic to avoid num of blocks far exceeding num_long_run_ids[0] split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1< {% if not dense %} emb_t, grad_t, cache_t, {% else %} scalar_t, at::acc_type<scalar_t, true>, scalar_t, {% endif %} {{ kMaxVecsPerThread }}> <<<div_round_up(long_run_ids.numel(), kMaxThreads), dim3(kWarpSize, BT_block_size), BT_block_size * sizeof(at::acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }}, at::cuda::getCurrentCUDAStream()>>>( grad_output_accessor, {% if not dense %} dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(), uvm_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(), lxu_cache_weights.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(), weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} dev_weights.packed_accessor64<scalar_t, 1, at::RestrictPtrTraits>(), {% endif %} weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), {% if not nobag %} D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} B, D, {% endif %} hash_size_cumsum.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), sorted_linear_indices_run .packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), sorted_linear_indices_cumulative_run_lengths .packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), num_long_run_ids.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), {% if not nobag %} infos_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} infos_sorted.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if not dense %} lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if weighted %} indice_weights_sorted.packed_accessor32<at::acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, at::RestrictPtrTraits>(), {% endif %} {% if not dense %} stochastic_rounding, rng_engine_inputs, {% else %} grad_dev_weights.packed_accessor64<scalar_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if not nobag %} FixedDivisor(B), {% endif %} {{ args.split_kernel_arg_constructors | join(", ") }}); C10_CUDA_KERNEL_LAUNCH_CHECK(); #ifndef __HIP_PLATFORM_HCC__ cudaFuncSetAttribute( split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1< {% if not dense %} emb_t, grad_t, cache_t, {% else %} scalar_t, at::acc_type<scalar_t, true>, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>, cudaFuncAttributeMaxDynamicSharedMemorySize, used_shared_bytes); // V100: 64 KB; A100: 96 KB. #endif C10_CUDA_KERNEL_LAUNCH_CHECK(); split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1< {% if not dense %} emb_t, grad_t, cache_t, {% else %} scalar_t, at::acc_type<scalar_t, true>, scalar_t, {% endif %} {{ kMaxVecsPerThread }}> <<<div_round_up(sorted_linear_indices_run.numel(), kBackwardMaxThreads / kWarpSize), dim3(kWarpSize, kBackwardMaxThreads / kWarpSize), BT_block_size * sizeof( at::acc_type< {% if not dense %} cache_t {% else %} scalar_t {% endif %}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }}, at::cuda::getCurrentCUDAStream()>>>( grad_output_accessor, {% if not dense %} dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(), uvm_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(), lxu_cache_weights.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(), weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} dev_weights.packed_accessor64<scalar_t, 1, at::RestrictPtrTraits>(), {% endif %} weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), {% if not nobag %} D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} B, D, {% endif %} hash_size_cumsum.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), sorted_linear_indices_run .packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), sorted_linear_indices_cumulative_run_lengths .packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% if not nobag %} infos_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% else %} infos_sorted.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if not dense %} lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if weighted %} indice_weights_sorted.packed_accessor32<at::acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, at::RestrictPtrTraits>(), {% endif %} sorted_linear_indices_num_runs .packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), max_segment_length_per_warp, {% if not dense %} stochastic_rounding, rng_engine_inputs, {% else %} grad_dev_weights.packed_accessor64<scalar_t, 1, at::RestrictPtrTraits>(), {% endif %} {% if not nobag %} FixedDivisor(B), {% endif %} {{ args.split_kernel_arg_constructors | join(", ") }}); C10_CUDA_KERNEL_LAUNCH_CHECK(); return; } {% endfor %} }); return {{ "grad_dev_weights" if dense else "" }}; } {% endif %} {% endfor %} // clang-format on
247ceb12ae27e806cc34d23750a8d6a6457d4a3c.hip
// !!! This is a file automatically generated by hipify!!! // Source: http://docs.nvidia.com/cuda/hiprand/index.html #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> #include <hiprand/hiprand_kernel.h> #include <iostream> #include <iomanip> #include <getopt.h> // we could vary M & N to find the perf sweet spot struct estimate_pi : public thrust::unary_function<unsigned int, float> { __device__ float operator()(unsigned int thread_id) { float sum = 0; unsigned int N = 10000; // samples per thread unsigned int seed = thread_id; hiprandState_t s; // seed a random number generator hiprand_init(seed, 0, 0, &s); // take N samples in a quarter circle for(unsigned int i = 0; i < N; ++i) { // draw a sample from the unit square float x = hiprand_uniform(&s); float y = hiprand_uniform(&s); // measure distance from the origin float dist = sqrtf(x*x + y*y); // add 1.0f if (u0,u1) is inside the quarter circle if(dist <= 1.0f) sum += 1.0f; } // multiply by 4 to get the area of the whole circle sum *= 4.0f; // divide by N return sum / N; } }; struct estimate_pi_d : public thrust::unary_function<unsigned int, double> { __device__ double operator()(unsigned int thread_id) { double sum = 0; unsigned int N = 10000; // samples per thread unsigned int seed = thread_id; hiprandState_t s; // seed a random number generator hiprand_init(seed, 0, 0, &s); // take N samples in a quarter circle for(unsigned int i = 0; i < N; ++i) { // draw a sample from the unit square double x = hiprand_uniform(&s); double y = hiprand_uniform(&s); // measure distance from the origin double dist = sqrtf(x*x + y*y); // add 1.0f if (u0,u1) is inside the quarter circle if(dist <= 1.0f) sum += 1.0f; } // multiply by 4 to get the area of the whole circle sum *= 4.0f; // divide by N return sum / N; } }; int main(int argc, char **argv) { int dp = 0; int c; while((c = getopt(argc, argv, "d")) != -1){ switch(c){ case 'd': dp = 1; printf("Run with double presision\n"); break; default: dp = 0; printf("Run with single presision\n"); break; } } // use 30K independent seeds int M = 30000; if(!dp){ float estimate = thrust::transform_reduce( thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(M), estimate_pi(), 0.0f, thrust::plus<float>()); estimate /= M; std::cout << std::setprecision(4); std::cout << "pi is approximately "; std::cout << estimate << std::endl; }else{ double estimate = thrust::transform_reduce( thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(M), estimate_pi_d(), 0.0f, thrust::plus<double>()); estimate /= M; std::cout << std::setprecision(4); std::cout << "pi is approximately "; std::cout << estimate << std::endl; } return 0; }
247ceb12ae27e806cc34d23750a8d6a6457d4a3c.cu
// Source: http://docs.nvidia.com/cuda/curand/index.html #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> #include <curand_kernel.h> #include <iostream> #include <iomanip> #include <getopt.h> // we could vary M & N to find the perf sweet spot struct estimate_pi : public thrust::unary_function<unsigned int, float> { __device__ float operator()(unsigned int thread_id) { float sum = 0; unsigned int N = 10000; // samples per thread unsigned int seed = thread_id; curandState s; // seed a random number generator curand_init(seed, 0, 0, &s); // take N samples in a quarter circle for(unsigned int i = 0; i < N; ++i) { // draw a sample from the unit square float x = curand_uniform(&s); float y = curand_uniform(&s); // measure distance from the origin float dist = sqrtf(x*x + y*y); // add 1.0f if (u0,u1) is inside the quarter circle if(dist <= 1.0f) sum += 1.0f; } // multiply by 4 to get the area of the whole circle sum *= 4.0f; // divide by N return sum / N; } }; struct estimate_pi_d : public thrust::unary_function<unsigned int, double> { __device__ double operator()(unsigned int thread_id) { double sum = 0; unsigned int N = 10000; // samples per thread unsigned int seed = thread_id; curandState s; // seed a random number generator curand_init(seed, 0, 0, &s); // take N samples in a quarter circle for(unsigned int i = 0; i < N; ++i) { // draw a sample from the unit square double x = curand_uniform(&s); double y = curand_uniform(&s); // measure distance from the origin double dist = sqrtf(x*x + y*y); // add 1.0f if (u0,u1) is inside the quarter circle if(dist <= 1.0f) sum += 1.0f; } // multiply by 4 to get the area of the whole circle sum *= 4.0f; // divide by N return sum / N; } }; int main(int argc, char **argv) { int dp = 0; int c; while((c = getopt(argc, argv, "d")) != -1){ switch(c){ case 'd': dp = 1; printf("Run with double presision\n"); break; default: dp = 0; printf("Run with single presision\n"); break; } } // use 30K independent seeds int M = 30000; if(!dp){ float estimate = thrust::transform_reduce( thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(M), estimate_pi(), 0.0f, thrust::plus<float>()); estimate /= M; std::cout << std::setprecision(4); std::cout << "pi is approximately "; std::cout << estimate << std::endl; }else{ double estimate = thrust::transform_reduce( thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(M), estimate_pi_d(), 0.0f, thrust::plus<double>()); estimate /= M; std::cout << std::setprecision(4); std::cout << "pi is approximately "; std::cout << estimate << std::endl; } return 0; }
1c3ad00582b34c6d0c3428dbcec6b8dedbbadce6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void return_double_(int n, double *b, const double*a){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) b[i] = 2*a[i]; } void return_double(int n, double *b, const double*a){ hipLaunchKernelGGL(( return_double_), dim3((n+255)/256), dim3(256), 0, 0, n, b, a); }
1c3ad00582b34c6d0c3428dbcec6b8dedbbadce6.cu
#include "cuda.h" __global__ void return_double_(int n, double *b, const double*a){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) b[i] = 2*a[i]; } void return_double(int n, double *b, const double*a){ return_double_<<<(n+255)/256, 256>>>(n, b, a); }
e444fe6d2686f969ca21f2ad50725f771c198fb2.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <vector> #include <sstream> #include <cmath> #include <mpi.h> //activate mpi #include "dg/algorithm.h" #include "dg/backend/timer.cuh" #include "dg/backend/xspacelib.cuh" #include "dg/backend/interpolation.cuh" #include "netcdf_par.h" //exclude if par netcdf=OFF #include "file/nc_utilities.h" #include "feltor.cuh" /* - the only difference to the feltor_hpc.cu file is that this program uses the MPI backend and the parallel netcdf output - pay attention that both the grid dimensions as well as the output dimensions must be divisible by the mpi process numbers */ typedef dg::MPI_FieldAligned< dg::CylindricalMPIGrid3d<dg::MDVec>, dg::IDMatrix,dg::BijectiveComm< dg::iDVec, dg::DVec >, dg::DVec> DFA; using namespace dg::geo::solovev; int main( int argc, char* argv[]) { ////////////////////////////////setup MPI/////////////////////////////// int provided; MPI_Init_thread( &argc, &argv, MPI_THREAD_FUNNELED, &provided); if( provided != MPI_THREAD_FUNNELED) { std::cerr << "wrong mpi-thread environment provided!\n"; return -1; } int periods[3] = {false, false, true}; //non-, non-, periodic int rank, size; MPI_Comm_rank( MPI_COMM_WORLD, &rank); MPI_Comm_size( MPI_COMM_WORLD, &size); #if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA int num_devices=0; hipGetDeviceCount(&num_devices); if(num_devices==0){std::cerr << "No CUDA capable devices found"<<std::endl; return -1;} int device = rank % num_devices; //assume # of gpus/node is fixed hipSetDevice( device); #endif//cuda int np[3]; if(rank==0) { std::cin>> np[0] >> np[1] >>np[2]; std::cout << "Computing with "<<np[0]<<" x "<<np[1]<<" x "<<np[2] << " = "<<size<<std::endl; assert( size == np[0]*np[1]*np[2]); } MPI_Bcast( np, 3, MPI_INT, 0, MPI_COMM_WORLD); MPI_Comm comm; MPI_Cart_create( MPI_COMM_WORLD, 3, np, periods, true, &comm); ////////////////////////Parameter initialisation////////////////////////// Json::Reader reader; Json::Value js, gs; if( argc != 4) { if(rank==0)std::cerr << "ERROR: Wrong number of arguments!\nUsage: "<< argv[0]<<" [inputfile] [geomfile] [outputfile]\n"; return -1; } else { std::ifstream is(argv[1]); std::ifstream ks(argv[2]); reader.parse(is,js,false); reader.parse(ks,gs,false); } const eule::Parameters p( js); const dg::geo::solovev::GeomParameters gp(gs); if(rank==0)p.display( std::cout); if(rank==0)gp.display( std::cout); std::string input = js.toStyledString(), geom = gs.toStyledString(); ////////////////////////////////set up computations/////////////////////////// double Rmin=gp.R_0-p.boxscaleRm*gp.a; double Zmin=-p.boxscaleZm*gp.a*gp.elongation; double Rmax=gp.R_0+p.boxscaleRp*gp.a; double Zmax=p.boxscaleZp*gp.a*gp.elongation; //Make grids dg::CylindricalMPIGrid3d<dg::MDVec> grid( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n, p.Nx, p.Ny, p.Nz, p.bc, p.bc, dg::PER, comm); dg::CylindricalMPIGrid3d<dg::MDVec> grid_out( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n_out, p.Nx_out, p.Ny_out, p.Nz_out, p.bc, p.bc, dg::PER, comm); //create RHS if(rank==0)std::cout << "Constructing Feltor...\n"; eule::Feltor<dg::CylindricalMPIGrid3d<dg::MDVec>, dg::DS<DFA, dg::MDMatrix, dg::MDVec>, dg::MDMatrix, dg::MDVec> feltor( grid, p, gp); //initialize before rolkar! if(rank==0)std::cout << "Constructing Rolkar...\n"; eule::Rolkar< dg::CylindricalMPIGrid3d<dg::MDVec>, dg::DS<DFA, dg::MDMatrix, dg::MDVec>, dg::MDMatrix, dg::MDVec > rolkar( grid, p, gp, feltor.ds(), feltor.dsDIR()); if(rank==0)std::cout << "Done!\n"; /////////////////////The initial field///////////////////////////////////////// //background profile dg::geo::Nprofile<Psip> prof(p.bgprofamp, p.nprofileamp, gp, Psip(gp)); //initial background profile std::vector<dg::MDVec> y0(4, dg::evaluate( prof, grid)), y1(y0); //perturbation dg::GaussianZ gaussianZ( 0., p.sigma_z*M_PI, 1); //modulation along fieldline if( p.mode == 0 || p.mode == 1) { dg::Gaussian init0( gp.R_0+p.posX*gp.a, p.posY*gp.a, p.sigma, p.sigma, p.amp); if( p.mode == 0) y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 3); //rounds =3 ->2*3-1 if( p.mode == 1) y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); //rounds =1 ->2*1-1 } if( p.mode == 2) { dg::BathRZ init0(16,16,p.Nz,Rmin,Zmin, 30.,5.,p.amp); y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); } if( p.mode == 3) { dg::geo::ZonalFlow<Psip> init0(p.amp, p.k_psi, gp, Psip(gp)); y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); } dg::blas1::axpby( 1., y1[1], 1., y0[1]); //sum up background and perturbation dg::blas1::plus(y0[1], -1); //initialize ni-1 if( p.mode == 2 || p.mode == 3) { dg::MDVec damping = dg::evaluate( dg::geo::GaussianProfXDamping<Psip>(Psip(gp), gp), grid); dg::blas1::pointwiseDot(damping, y0[1], y0[1]); //damp with gaussprofdamp } std::cout << "intiialize ne" << std::endl; if( p.initcond == 0) feltor.initializene( y0[1], y0[0]); if( p.initcond == 1) dg::blas1::axpby( 1., y0[1], 0.,y0[0], y0[0]); //set n_e = N_i std::cout << "Done!\n"; dg::blas1::axpby( 0., y0[2], 0., y0[2]); //set Ue = 0 dg::blas1::axpby( 0., y0[3], 0., y0[3]); //set Ui = 0 dg::Karniadakis< std::vector<dg::MDVec> > karniadakis( y0, y0[0].size(), p.eps_time); karniadakis.init( feltor, rolkar, y0, p.dt); /////////////////////////////set up netcdf///////////////////////////////// file::NC_Error_Handle err; int ncid; MPI_Info info = MPI_INFO_NULL; err = nc_create_par( argv[3], NC_NETCDF4|NC_MPIIO|NC_CLOBBER, comm, info, &ncid); //MPI ON //err = nc_create( argv[3],NC_NETCDF4|NC_CLOBBER, &ncid); //MPI OFF err = nc_put_att_text( ncid, NC_GLOBAL, "inputfile", input.size(), input.data()); err = nc_put_att_text( ncid, NC_GLOBAL, "geomfile", geom.size(), geom.data()); int dimids[4], tvarID; { MagneticField c(gp); err = file::define_dimensions( ncid, dimids, &tvarID, grid_out.global()); dg::geo::FieldR<MagneticField> fieldR(c, gp.R_0); dg::geo::FieldZ<MagneticField> fieldZ(c, gp.R_0); dg::geo::FieldP<MagneticField> fieldP(c, gp.R_0); dg::HVec vecR = dg::evaluate( fieldR, grid_out.global()); dg::HVec vecZ = dg::evaluate( fieldZ, grid_out.global()); dg::HVec vecP = dg::evaluate( fieldP, grid_out.global()); int vecID[3]; err = nc_def_var( ncid, "BR", NC_DOUBLE, 3, &dimids[1], &vecID[0]); err = nc_def_var( ncid, "BZ", NC_DOUBLE, 3, &dimids[1], &vecID[1]); err = nc_def_var( ncid, "BP", NC_DOUBLE, 3, &dimids[1], &vecID[2]); err = nc_enddef( ncid); err = nc_put_var_double( ncid, vecID[0], vecR.data()); err = nc_put_var_double( ncid, vecID[1], vecZ.data()); err = nc_put_var_double( ncid, vecID[2], vecP.data()); err = nc_redef(ncid); } //field IDs std::string names[5] = {"electrons", "ions", "Ue", "Ui", "potential"}; int dataIDs[5]; //VARIABLE IDS for( unsigned i=0; i<5; i++) err = nc_def_var( ncid, names[i].data(), NC_DOUBLE, 4, dimids, &dataIDs[i]); //energy IDs int EtimeID, EtimevarID; err = file::define_time( ncid, "energy_time", &EtimeID, &EtimevarID); int energyID, massID, energyIDs[5], dissID, alignedID, dEdtID, accuracyID; err = nc_def_var( ncid, "energy", NC_DOUBLE, 1, &EtimeID, &energyID); err = nc_def_var( ncid, "mass", NC_DOUBLE, 1, &EtimeID, &massID); std::string energies[5] = {"Se", "Si", "Uperp", "Upare", "Upari"}; for( unsigned i=0; i<5; i++) err = nc_def_var( ncid, energies[i].data(), NC_DOUBLE, 1, &EtimeID, &energyIDs[i]); err = nc_def_var( ncid, "dissipation", NC_DOUBLE, 1, &EtimeID, &dissID); err = nc_def_var( ncid, "alignment", NC_DOUBLE, 1, &EtimeID, &alignedID); err = nc_def_var( ncid, "dEdt", NC_DOUBLE, 1, &EtimeID, &dEdtID); err = nc_def_var( ncid, "accuracy", NC_DOUBLE, 1, &EtimeID, &accuracyID); //probe vars definition int NepID,phipID; err = nc_def_var( ncid, "Ne_p", NC_DOUBLE, 1, &EtimeID, &NepID); err = nc_def_var( ncid, "phi_p", NC_DOUBLE, 1, &EtimeID, &phipID); for(unsigned i=0; i<5; i++) { err = nc_var_par_access( ncid, energyIDs[i], NC_COLLECTIVE); err = nc_var_par_access( ncid, dataIDs[i], NC_COLLECTIVE); } err = nc_var_par_access( ncid, tvarID, NC_COLLECTIVE); err = nc_var_par_access( ncid, EtimevarID, NC_COLLECTIVE); err = nc_var_par_access( ncid, energyID, NC_COLLECTIVE); err = nc_var_par_access( ncid, massID, NC_COLLECTIVE); err = nc_var_par_access( ncid, dissID, NC_COLLECTIVE); err = nc_var_par_access( ncid, alignedID, NC_COLLECTIVE); err = nc_var_par_access( ncid, dEdtID, NC_COLLECTIVE); err = nc_var_par_access( ncid, accuracyID, NC_COLLECTIVE); err = nc_var_par_access( ncid, NepID, NC_COLLECTIVE); err = nc_var_par_access( ncid, phipID, NC_COLLECTIVE); err = nc_enddef(ncid); ///////////////////////////////////PROBE////////////////////////////// const dg::HVec Xprobe(1,gp.R_0+p.boxscaleRp*gp.a); const dg::HVec Zprobe(1,0.); const dg::HVec Phiprobe(1,M_PI); dg::IDMatrix probeinterp; int probeRANK = grid.pidOf( Xprobe[0], Zprobe[0], Phiprobe[0]); if(rank==probeRANK) probeinterp=dg::create::interpolation( Xprobe,Zprobe,Phiprobe,grid.local(), dg::NEU); dg::DVec probevalue(1,0.); ///////////////////////////first output///////////////////////////////// if(rank==0)std::cout << "First output ... \n"; int dims[3], coords[3]; MPI_Cart_get( comm, 3, dims, periods, coords); size_t count[4] = {1, grid_out.Nz(), grid_out.n()*(grid_out.Ny()), grid_out.n()*(grid_out.Nx())}; size_t start[4] = {0, coords[2]*count[1], coords[1]*count[2], coords[0]*count[3]}; dg::MDVec transfer( dg::evaluate(dg::zero, grid)); dg::DVec transferD( dg::evaluate(dg::zero, grid_out.local())); dg::HVec transferH( dg::evaluate(dg::zero, grid_out.local())); dg::IDMatrix interpolate = dg::create::interpolation( grid_out.local(), grid.local()); //create local interpolation matrix for( unsigned i=0; i<4; i++) { dg::blas2::gemv( interpolate, y0[i].data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[i], start, count, transferH.data() ); } transfer = feltor.potential()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data()); double time = 0; err = nc_put_vara_double( ncid, tvarID, start, count, &time); err = nc_put_vara_double( ncid, EtimevarID, start, count, &time); size_t Estart[] = {0}; size_t Ecount[] = {1}; double energy0 = feltor.energy(), mass0 = feltor.mass(), E0 = energy0, mass = mass0, E1 = 0.0, dEdt = 0., diss = 0., aligned=0, accuracy=0.; std::vector<double> evec = feltor.energy_vector(); err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &energy0); err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass0); for( unsigned i=0; i<5; i++) err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]); err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss); err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned); err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt); err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy); //probe double Nep=0, phip=0; if(rank==probeRANK) { dg::blas2::gemv(probeinterp,y0[0].data(),probevalue); Nep=probevalue[0] ; dg::blas2::gemv(probeinterp,feltor.potential()[0].data(),probevalue); phip=probevalue[0] ; } MPI_Bcast( &Nep,1 , MPI_DOUBLE, probeRANK, grid.communicator()); MPI_Bcast( &phip,1 , MPI_DOUBLE, probeRANK, grid.communicator()); err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep); err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip); if(rank==0)std::cout << "First write successful!\n"; ///////////////////////////////////////Timeloop///////////////////////////////// dg::Timer t; t.tic(); #ifdef DG_BENCHMARK unsigned step = 0; #endif //DG_BENCHMARK for( unsigned i=1; i<=p.maxout; i++) { #ifdef DG_BENCHMARK dg::Timer ti; ti.tic(); #endif//DG_BENCHMARK for( unsigned j=0; j<p.itstp; j++) { try{ karniadakis( feltor, rolkar, y0);} catch( dg::Fail& fail) { if(rank==0)std::cerr << "CG failed to converge to "<<fail.epsilon()<<"\n"; if(rank==0)std::cerr << "Does Simulation respect CFL condition?"<<std::endl; err = nc_close(ncid); MPI_Finalize(); return -1; } step++; time+=p.dt; Estart[0] = step; E1 = feltor.energy(), mass = feltor.mass(), diss = feltor.energy_diffusion(); dEdt = (E1 - E0)/p.dt; E0 = E1; accuracy = 2.*fabs( (dEdt-diss)/(dEdt + diss)); evec = feltor.energy_vector(); err = nc_put_vara_double( ncid, EtimevarID, Estart, Ecount, &time); err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &E1); err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass); for( unsigned i=0; i<5; i++) err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]); err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss); err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned); err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt); err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy); if(rank==probeRANK) { dg::blas2::gemv(probeinterp,y0[0].data(),probevalue); Nep= probevalue[0] ; dg::blas2::gemv(probeinterp,feltor.potential()[0].data(),probevalue); phip=probevalue[0] ; } MPI_Bcast( &Nep, 1 ,MPI_DOUBLE, probeRANK, grid.communicator()); MPI_Bcast( &phip,1 ,MPI_DOUBLE, probeRANK, grid.communicator()); err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep); err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip); if(rank==0)std::cout << "(m_tot-m_0)/m_0: "<< (feltor.mass()-mass0)/mass0<<"\t"; if(rank==0)std::cout << "(E_tot-E_0)/E_0: "<< (E1-energy0)/energy0<<"\t"; if(rank==0)std::cout <<" d E/dt = " << dEdt <<" Lambda = " << diss << " -> Accuracy: "<< accuracy << "\n"; } #ifdef DG_BENCHMARK ti.toc(); if(rank==0)std::cout << "\n\t Step "<<step <<" of "<<p.itstp*p.maxout <<" at time "<<time; if(rank==0)std::cout << "\n\t Average time for one step: "<<ti.diff()/(double)p.itstp<<"s"; ti.tic(); #endif//DG_BENCHMARK //err = nc_open_par( argv[3], NC_WRITE|NC_MPIIO, comm, info, &ncid); //dont do it //////////////////////////write fields//////////////////////// start[0] = i; for( unsigned j=0; j<4; j++) { dg::blas2::gemv( interpolate, y0[j].data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[j], start, count, transferH.data()); } transfer = feltor.potential()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data() ); err = nc_put_vara_double( ncid, tvarID, start, count, &time); //err = nc_close(ncid); DONT DO IT! #ifdef DG_BENCHMARK ti.toc(); if(rank==0)std::cout << "\n\t Time for output: "<<ti.diff()<<"s\n\n"<<std::flush; #endif//DG_BENCHMARK } t.toc(); unsigned hour = (unsigned)floor(t.diff()/3600); unsigned minute = (unsigned)floor( (t.diff() - hour*3600)/60); double second = t.diff() - hour*3600 - minute*60; if(rank==0)std::cout << std::fixed << std::setprecision(2) <<std::setfill('0'); if(rank==0)std::cout <<"Computation Time \t"<<hour<<":"<<std::setw(2)<<minute<<":"<<second<<"\n"; if(rank==0)std::cout <<"which is \t"<<t.diff()/p.itstp/p.maxout<<"s/step\n"; err = nc_close(ncid); MPI_Finalize(); return 0; }
e444fe6d2686f969ca21f2ad50725f771c198fb2.cu
#include <iostream> #include <iomanip> #include <vector> #include <sstream> #include <cmath> #include <mpi.h> //activate mpi #include "dg/algorithm.h" #include "dg/backend/timer.cuh" #include "dg/backend/xspacelib.cuh" #include "dg/backend/interpolation.cuh" #include "netcdf_par.h" //exclude if par netcdf=OFF #include "file/nc_utilities.h" #include "feltor.cuh" /* - the only difference to the feltor_hpc.cu file is that this program uses the MPI backend and the parallel netcdf output - pay attention that both the grid dimensions as well as the output dimensions must be divisible by the mpi process numbers */ typedef dg::MPI_FieldAligned< dg::CylindricalMPIGrid3d<dg::MDVec>, dg::IDMatrix,dg::BijectiveComm< dg::iDVec, dg::DVec >, dg::DVec> DFA; using namespace dg::geo::solovev; int main( int argc, char* argv[]) { ////////////////////////////////setup MPI/////////////////////////////// int provided; MPI_Init_thread( &argc, &argv, MPI_THREAD_FUNNELED, &provided); if( provided != MPI_THREAD_FUNNELED) { std::cerr << "wrong mpi-thread environment provided!\n"; return -1; } int periods[3] = {false, false, true}; //non-, non-, periodic int rank, size; MPI_Comm_rank( MPI_COMM_WORLD, &rank); MPI_Comm_size( MPI_COMM_WORLD, &size); #if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA int num_devices=0; cudaGetDeviceCount(&num_devices); if(num_devices==0){std::cerr << "No CUDA capable devices found"<<std::endl; return -1;} int device = rank % num_devices; //assume # of gpus/node is fixed cudaSetDevice( device); #endif//cuda int np[3]; if(rank==0) { std::cin>> np[0] >> np[1] >>np[2]; std::cout << "Computing with "<<np[0]<<" x "<<np[1]<<" x "<<np[2] << " = "<<size<<std::endl; assert( size == np[0]*np[1]*np[2]); } MPI_Bcast( np, 3, MPI_INT, 0, MPI_COMM_WORLD); MPI_Comm comm; MPI_Cart_create( MPI_COMM_WORLD, 3, np, periods, true, &comm); ////////////////////////Parameter initialisation////////////////////////// Json::Reader reader; Json::Value js, gs; if( argc != 4) { if(rank==0)std::cerr << "ERROR: Wrong number of arguments!\nUsage: "<< argv[0]<<" [inputfile] [geomfile] [outputfile]\n"; return -1; } else { std::ifstream is(argv[1]); std::ifstream ks(argv[2]); reader.parse(is,js,false); reader.parse(ks,gs,false); } const eule::Parameters p( js); const dg::geo::solovev::GeomParameters gp(gs); if(rank==0)p.display( std::cout); if(rank==0)gp.display( std::cout); std::string input = js.toStyledString(), geom = gs.toStyledString(); ////////////////////////////////set up computations/////////////////////////// double Rmin=gp.R_0-p.boxscaleRm*gp.a; double Zmin=-p.boxscaleZm*gp.a*gp.elongation; double Rmax=gp.R_0+p.boxscaleRp*gp.a; double Zmax=p.boxscaleZp*gp.a*gp.elongation; //Make grids dg::CylindricalMPIGrid3d<dg::MDVec> grid( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n, p.Nx, p.Ny, p.Nz, p.bc, p.bc, dg::PER, comm); dg::CylindricalMPIGrid3d<dg::MDVec> grid_out( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n_out, p.Nx_out, p.Ny_out, p.Nz_out, p.bc, p.bc, dg::PER, comm); //create RHS if(rank==0)std::cout << "Constructing Feltor...\n"; eule::Feltor<dg::CylindricalMPIGrid3d<dg::MDVec>, dg::DS<DFA, dg::MDMatrix, dg::MDVec>, dg::MDMatrix, dg::MDVec> feltor( grid, p, gp); //initialize before rolkar! if(rank==0)std::cout << "Constructing Rolkar...\n"; eule::Rolkar< dg::CylindricalMPIGrid3d<dg::MDVec>, dg::DS<DFA, dg::MDMatrix, dg::MDVec>, dg::MDMatrix, dg::MDVec > rolkar( grid, p, gp, feltor.ds(), feltor.dsDIR()); if(rank==0)std::cout << "Done!\n"; /////////////////////The initial field///////////////////////////////////////// //background profile dg::geo::Nprofile<Psip> prof(p.bgprofamp, p.nprofileamp, gp, Psip(gp)); //initial background profile std::vector<dg::MDVec> y0(4, dg::evaluate( prof, grid)), y1(y0); //perturbation dg::GaussianZ gaussianZ( 0., p.sigma_z*M_PI, 1); //modulation along fieldline if( p.mode == 0 || p.mode == 1) { dg::Gaussian init0( gp.R_0+p.posX*gp.a, p.posY*gp.a, p.sigma, p.sigma, p.amp); if( p.mode == 0) y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 3); //rounds =3 ->2*3-1 if( p.mode == 1) y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); //rounds =1 ->2*1-1 } if( p.mode == 2) { dg::BathRZ init0(16,16,p.Nz,Rmin,Zmin, 30.,5.,p.amp); y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); } if( p.mode == 3) { dg::geo::ZonalFlow<Psip> init0(p.amp, p.k_psi, gp, Psip(gp)); y1[1] = feltor.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); } dg::blas1::axpby( 1., y1[1], 1., y0[1]); //sum up background and perturbation dg::blas1::plus(y0[1], -1); //initialize ni-1 if( p.mode == 2 || p.mode == 3) { dg::MDVec damping = dg::evaluate( dg::geo::GaussianProfXDamping<Psip>(Psip(gp), gp), grid); dg::blas1::pointwiseDot(damping, y0[1], y0[1]); //damp with gaussprofdamp } std::cout << "intiialize ne" << std::endl; if( p.initcond == 0) feltor.initializene( y0[1], y0[0]); if( p.initcond == 1) dg::blas1::axpby( 1., y0[1], 0.,y0[0], y0[0]); //set n_e = N_i std::cout << "Done!\n"; dg::blas1::axpby( 0., y0[2], 0., y0[2]); //set Ue = 0 dg::blas1::axpby( 0., y0[3], 0., y0[3]); //set Ui = 0 dg::Karniadakis< std::vector<dg::MDVec> > karniadakis( y0, y0[0].size(), p.eps_time); karniadakis.init( feltor, rolkar, y0, p.dt); /////////////////////////////set up netcdf///////////////////////////////// file::NC_Error_Handle err; int ncid; MPI_Info info = MPI_INFO_NULL; err = nc_create_par( argv[3], NC_NETCDF4|NC_MPIIO|NC_CLOBBER, comm, info, &ncid); //MPI ON //err = nc_create( argv[3],NC_NETCDF4|NC_CLOBBER, &ncid); //MPI OFF err = nc_put_att_text( ncid, NC_GLOBAL, "inputfile", input.size(), input.data()); err = nc_put_att_text( ncid, NC_GLOBAL, "geomfile", geom.size(), geom.data()); int dimids[4], tvarID; { MagneticField c(gp); err = file::define_dimensions( ncid, dimids, &tvarID, grid_out.global()); dg::geo::FieldR<MagneticField> fieldR(c, gp.R_0); dg::geo::FieldZ<MagneticField> fieldZ(c, gp.R_0); dg::geo::FieldP<MagneticField> fieldP(c, gp.R_0); dg::HVec vecR = dg::evaluate( fieldR, grid_out.global()); dg::HVec vecZ = dg::evaluate( fieldZ, grid_out.global()); dg::HVec vecP = dg::evaluate( fieldP, grid_out.global()); int vecID[3]; err = nc_def_var( ncid, "BR", NC_DOUBLE, 3, &dimids[1], &vecID[0]); err = nc_def_var( ncid, "BZ", NC_DOUBLE, 3, &dimids[1], &vecID[1]); err = nc_def_var( ncid, "BP", NC_DOUBLE, 3, &dimids[1], &vecID[2]); err = nc_enddef( ncid); err = nc_put_var_double( ncid, vecID[0], vecR.data()); err = nc_put_var_double( ncid, vecID[1], vecZ.data()); err = nc_put_var_double( ncid, vecID[2], vecP.data()); err = nc_redef(ncid); } //field IDs std::string names[5] = {"electrons", "ions", "Ue", "Ui", "potential"}; int dataIDs[5]; //VARIABLE IDS for( unsigned i=0; i<5; i++) err = nc_def_var( ncid, names[i].data(), NC_DOUBLE, 4, dimids, &dataIDs[i]); //energy IDs int EtimeID, EtimevarID; err = file::define_time( ncid, "energy_time", &EtimeID, &EtimevarID); int energyID, massID, energyIDs[5], dissID, alignedID, dEdtID, accuracyID; err = nc_def_var( ncid, "energy", NC_DOUBLE, 1, &EtimeID, &energyID); err = nc_def_var( ncid, "mass", NC_DOUBLE, 1, &EtimeID, &massID); std::string energies[5] = {"Se", "Si", "Uperp", "Upare", "Upari"}; for( unsigned i=0; i<5; i++) err = nc_def_var( ncid, energies[i].data(), NC_DOUBLE, 1, &EtimeID, &energyIDs[i]); err = nc_def_var( ncid, "dissipation", NC_DOUBLE, 1, &EtimeID, &dissID); err = nc_def_var( ncid, "alignment", NC_DOUBLE, 1, &EtimeID, &alignedID); err = nc_def_var( ncid, "dEdt", NC_DOUBLE, 1, &EtimeID, &dEdtID); err = nc_def_var( ncid, "accuracy", NC_DOUBLE, 1, &EtimeID, &accuracyID); //probe vars definition int NepID,phipID; err = nc_def_var( ncid, "Ne_p", NC_DOUBLE, 1, &EtimeID, &NepID); err = nc_def_var( ncid, "phi_p", NC_DOUBLE, 1, &EtimeID, &phipID); for(unsigned i=0; i<5; i++) { err = nc_var_par_access( ncid, energyIDs[i], NC_COLLECTIVE); err = nc_var_par_access( ncid, dataIDs[i], NC_COLLECTIVE); } err = nc_var_par_access( ncid, tvarID, NC_COLLECTIVE); err = nc_var_par_access( ncid, EtimevarID, NC_COLLECTIVE); err = nc_var_par_access( ncid, energyID, NC_COLLECTIVE); err = nc_var_par_access( ncid, massID, NC_COLLECTIVE); err = nc_var_par_access( ncid, dissID, NC_COLLECTIVE); err = nc_var_par_access( ncid, alignedID, NC_COLLECTIVE); err = nc_var_par_access( ncid, dEdtID, NC_COLLECTIVE); err = nc_var_par_access( ncid, accuracyID, NC_COLLECTIVE); err = nc_var_par_access( ncid, NepID, NC_COLLECTIVE); err = nc_var_par_access( ncid, phipID, NC_COLLECTIVE); err = nc_enddef(ncid); ///////////////////////////////////PROBE////////////////////////////// const dg::HVec Xprobe(1,gp.R_0+p.boxscaleRp*gp.a); const dg::HVec Zprobe(1,0.); const dg::HVec Phiprobe(1,M_PI); dg::IDMatrix probeinterp; int probeRANK = grid.pidOf( Xprobe[0], Zprobe[0], Phiprobe[0]); if(rank==probeRANK) probeinterp=dg::create::interpolation( Xprobe,Zprobe,Phiprobe,grid.local(), dg::NEU); dg::DVec probevalue(1,0.); ///////////////////////////first output///////////////////////////////// if(rank==0)std::cout << "First output ... \n"; int dims[3], coords[3]; MPI_Cart_get( comm, 3, dims, periods, coords); size_t count[4] = {1, grid_out.Nz(), grid_out.n()*(grid_out.Ny()), grid_out.n()*(grid_out.Nx())}; size_t start[4] = {0, coords[2]*count[1], coords[1]*count[2], coords[0]*count[3]}; dg::MDVec transfer( dg::evaluate(dg::zero, grid)); dg::DVec transferD( dg::evaluate(dg::zero, grid_out.local())); dg::HVec transferH( dg::evaluate(dg::zero, grid_out.local())); dg::IDMatrix interpolate = dg::create::interpolation( grid_out.local(), grid.local()); //create local interpolation matrix for( unsigned i=0; i<4; i++) { dg::blas2::gemv( interpolate, y0[i].data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[i], start, count, transferH.data() ); } transfer = feltor.potential()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data()); double time = 0; err = nc_put_vara_double( ncid, tvarID, start, count, &time); err = nc_put_vara_double( ncid, EtimevarID, start, count, &time); size_t Estart[] = {0}; size_t Ecount[] = {1}; double energy0 = feltor.energy(), mass0 = feltor.mass(), E0 = energy0, mass = mass0, E1 = 0.0, dEdt = 0., diss = 0., aligned=0, accuracy=0.; std::vector<double> evec = feltor.energy_vector(); err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &energy0); err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass0); for( unsigned i=0; i<5; i++) err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]); err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss); err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned); err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt); err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy); //probe double Nep=0, phip=0; if(rank==probeRANK) { dg::blas2::gemv(probeinterp,y0[0].data(),probevalue); Nep=probevalue[0] ; dg::blas2::gemv(probeinterp,feltor.potential()[0].data(),probevalue); phip=probevalue[0] ; } MPI_Bcast( &Nep,1 , MPI_DOUBLE, probeRANK, grid.communicator()); MPI_Bcast( &phip,1 , MPI_DOUBLE, probeRANK, grid.communicator()); err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep); err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip); if(rank==0)std::cout << "First write successful!\n"; ///////////////////////////////////////Timeloop///////////////////////////////// dg::Timer t; t.tic(); #ifdef DG_BENCHMARK unsigned step = 0; #endif //DG_BENCHMARK for( unsigned i=1; i<=p.maxout; i++) { #ifdef DG_BENCHMARK dg::Timer ti; ti.tic(); #endif//DG_BENCHMARK for( unsigned j=0; j<p.itstp; j++) { try{ karniadakis( feltor, rolkar, y0);} catch( dg::Fail& fail) { if(rank==0)std::cerr << "CG failed to converge to "<<fail.epsilon()<<"\n"; if(rank==0)std::cerr << "Does Simulation respect CFL condition?"<<std::endl; err = nc_close(ncid); MPI_Finalize(); return -1; } step++; time+=p.dt; Estart[0] = step; E1 = feltor.energy(), mass = feltor.mass(), diss = feltor.energy_diffusion(); dEdt = (E1 - E0)/p.dt; E0 = E1; accuracy = 2.*fabs( (dEdt-diss)/(dEdt + diss)); evec = feltor.energy_vector(); err = nc_put_vara_double( ncid, EtimevarID, Estart, Ecount, &time); err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &E1); err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass); for( unsigned i=0; i<5; i++) err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]); err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss); err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned); err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt); err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy); if(rank==probeRANK) { dg::blas2::gemv(probeinterp,y0[0].data(),probevalue); Nep= probevalue[0] ; dg::blas2::gemv(probeinterp,feltor.potential()[0].data(),probevalue); phip=probevalue[0] ; } MPI_Bcast( &Nep, 1 ,MPI_DOUBLE, probeRANK, grid.communicator()); MPI_Bcast( &phip,1 ,MPI_DOUBLE, probeRANK, grid.communicator()); err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep); err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip); if(rank==0)std::cout << "(m_tot-m_0)/m_0: "<< (feltor.mass()-mass0)/mass0<<"\t"; if(rank==0)std::cout << "(E_tot-E_0)/E_0: "<< (E1-energy0)/energy0<<"\t"; if(rank==0)std::cout <<" d E/dt = " << dEdt <<" Lambda = " << diss << " -> Accuracy: "<< accuracy << "\n"; } #ifdef DG_BENCHMARK ti.toc(); if(rank==0)std::cout << "\n\t Step "<<step <<" of "<<p.itstp*p.maxout <<" at time "<<time; if(rank==0)std::cout << "\n\t Average time for one step: "<<ti.diff()/(double)p.itstp<<"s"; ti.tic(); #endif//DG_BENCHMARK //err = nc_open_par( argv[3], NC_WRITE|NC_MPIIO, comm, info, &ncid); //dont do it //////////////////////////write fields//////////////////////// start[0] = i; for( unsigned j=0; j<4; j++) { dg::blas2::gemv( interpolate, y0[j].data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[j], start, count, transferH.data()); } transfer = feltor.potential()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data() ); err = nc_put_vara_double( ncid, tvarID, start, count, &time); //err = nc_close(ncid); DONT DO IT! #ifdef DG_BENCHMARK ti.toc(); if(rank==0)std::cout << "\n\t Time for output: "<<ti.diff()<<"s\n\n"<<std::flush; #endif//DG_BENCHMARK } t.toc(); unsigned hour = (unsigned)floor(t.diff()/3600); unsigned minute = (unsigned)floor( (t.diff() - hour*3600)/60); double second = t.diff() - hour*3600 - minute*60; if(rank==0)std::cout << std::fixed << std::setprecision(2) <<std::setfill('0'); if(rank==0)std::cout <<"Computation Time \t"<<hour<<":"<<std::setw(2)<<minute<<":"<<second<<"\n"; if(rank==0)std::cout <<"which is \t"<<t.diff()/p.itstp/p.maxout<<"s/step\n"; err = nc_close(ncid); MPI_Finalize(); return 0; }
5967cb05427dae6118473e58668b141e7f76feec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" unsigned char *d_red, *d_green, *d_blue; float *d_filter; __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int global_image_position = y * numCols + x; float result = 0.f; if( x >= numCols || y >= numRows) return; //For every value in the filter around the pixel (c, r) for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) { for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image int image_r = min(max(y + filter_r, 0), static_cast<int>(numRows - 1)); int image_c = min(max(x + filter_c, 0), static_cast<int>(numCols - 1)); float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]); float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2]; result += image_value * filter_value; } } outputChannel[global_image_position] = result; // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int global_image_position = y * numCols + x; // check if we are in boundries of the image if( x >= numCols || y >= numRows) return; // separate channels uchar4 temp = inputImageRGBA[global_image_position]; redChannel[global_image_position] = temp.x; greenChannel[global_image_position] = temp.y; blueChannel[global_image_position] = temp.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { int unsigned_size = sizeof(unsigned char) * numRowsImage * numColsImage; int float_size = sizeof(float) * filterWidth * filterWidth; //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, unsigned_size)); checkCudaErrors(hipMalloc(&d_green, unsigned_size)); checkCudaErrors(hipMalloc(&d_blue, unsigned_size)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, float_size)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter,float_size, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const int N = 16; const int M = 16; const dim3 blockSize(N,M,1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(numCols/N+1, numRows/M+1,1); //TODO-DONE: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO-DONE: Call your convolution kernel here 3 times, once for each color channel. // RED hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // GREEN hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // BLUE hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
5967cb05427dae6118473e58668b141e7f76feec.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" unsigned char *d_red, *d_green, *d_blue; float *d_filter; __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int global_image_position = y * numCols + x; float result = 0.f; if( x >= numCols || y >= numRows) return; //For every value in the filter around the pixel (c, r) for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) { for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image int image_r = min(max(y + filter_r, 0), static_cast<int>(numRows - 1)); int image_c = min(max(x + filter_c, 0), static_cast<int>(numCols - 1)); float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]); float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2]; result += image_value * filter_value; } } outputChannel[global_image_position] = result; // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int global_image_position = y * numCols + x; // check if we are in boundries of the image if( x >= numCols || y >= numRows) return; // separate channels uchar4 temp = inputImageRGBA[global_image_position]; redChannel[global_image_position] = temp.x; greenChannel[global_image_position] = temp.y; blueChannel[global_image_position] = temp.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { int unsigned_size = sizeof(unsigned char) * numRowsImage * numColsImage; int float_size = sizeof(float) * filterWidth * filterWidth; //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, unsigned_size)); checkCudaErrors(cudaMalloc(&d_green, unsigned_size)); checkCudaErrors(cudaMalloc(&d_blue, unsigned_size)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, float_size)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter,float_size, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const int N = 16; const int M = 16; const dim3 blockSize(N,M,1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(numCols/N+1, numRows/M+1,1); //TODO-DONE: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO-DONE: Call your convolution kernel here 3 times, once for each color channel. // RED gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // GREEN gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // BLUE gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
e632cc3d91335448464ea92024200347e646e473.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void kernel_forwardDVF(float *mx, float *my, float *mz, hipTextureObject_t alpha_x, hipTextureObject_t alpha_y, hipTextureObject_t alpha_z, hipTextureObject_t beta_x, hipTextureObject_t beta_y, hipTextureObject_t beta_z, float volume, float flow, int nx, int ny, int nz) { int ix = 16 * blockIdx.x + threadIdx.x; int iy = 16 * blockIdx.y + threadIdx.y; int iz = 4 * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = ix + iy * nx + iz * nx * ny; mx[id] = tex3D<float>(alpha_x, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * volume + tex3D<float>(beta_x, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * flow; my[id] = tex3D<float>(alpha_y, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * volume + tex3D<float>(beta_y, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * flow; mz[id] = tex3D<float>(alpha_z, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * volume + tex3D<float>(beta_z, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * flow; }
e632cc3d91335448464ea92024200347e646e473.cu
__global__ void kernel_forwardDVF(float *mx, float *my, float *mz, cudaTextureObject_t alpha_x, cudaTextureObject_t alpha_y, cudaTextureObject_t alpha_z, cudaTextureObject_t beta_x, cudaTextureObject_t beta_y, cudaTextureObject_t beta_z, float volume, float flow, int nx, int ny, int nz) { int ix = 16 * blockIdx.x + threadIdx.x; int iy = 16 * blockIdx.y + threadIdx.y; int iz = 4 * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = ix + iy * nx + iz * nx * ny; mx[id] = tex3D<float>(alpha_x, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * volume + tex3D<float>(beta_x, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * flow; my[id] = tex3D<float>(alpha_y, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * volume + tex3D<float>(beta_y, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * flow; mz[id] = tex3D<float>(alpha_z, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * volume + tex3D<float>(beta_z, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * flow; }
aec53a1ddce532c1d261b3a1777283b60bfd265f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_utils.h" #include "macros.hpp" #include <ATen/ExpandUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPUtils.h> #include <c10/core/ScalarType.h> #include <stdio.h> #include <torch/extension.h> /* return the indice of current point in the idxList -1 outside >= 0 inside */ template <typename indice_t> __device__ void is_inside(const int topK, const indice_t *__restrict__ idxList, const indice_t curr_Idx, int *curK) { for (size_t i = 0; i < topK; i++) { // a pixel is inside the splat if idxList contains point index if (idxList[i] == curr_Idx) { *curK = i; return; } // a pixel definitely isn't inside a splat if it's not occupied by any point if (idxList[i] == -1) { *curK = -1; return; } } *curK = -1; return; } /* compute pixel color after removing a point from a merged pixel */ // TODO curPointList probably no necessary, since rhoList and WsList will be // zero at curPointList[k] == -1 template <typename scalar_t, typename indice_t> __device__ void after_removal(const int numColors, const int topK, const int curK, const scalar_t depthThres, const scalar_t *depthList, const indice_t *curPointList, // topK const uint8_t *curIsBehind, // topK const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *curPixel, // numColors scalar_t *newColors, // numColors scalar_t *newDepth) { // initialize color with 0.0 for (size_t c = 0; c < numColors; c++) { newColors[c] = 0.0; } // initialize depth with the farthest so far *newDepth = depthList[topK - 1]; scalar_t sumRho = 0.0; int numVisible = 0; for (size_t k = 0; k < topK; k++) { if (curIsBehind[k] == 0) ++numVisible; } // if it's the only visible point, then removing it will reveal the // color below assert(numVisible >= 0); if (numVisible == 1) { sumRho = 0.0; // CHECK: should be the second? scalar_t curDepth = depthList[1]; { size_t k = curK + 1; while (k < topK) { // as soon as idxList is -1 or depth > currentDepth+threshold // stop accumulating colors if (curPointList[k] == -1) { break; } if ((depthList[k] - curDepth) > depthThres) { break; } for (size_t c = 0; c < numColors; c++) { newColors[c] += wsList[k * numColors + c] * rhoList[k]; } sumRho += rhoList[k]; if (depthList[k] < *newDepth) { *newDepth = depthList[k]; } ++k; } } for (size_t c = 0; c < numColors; c++) { newColors[c] /= (sumRho + 1e-8); } return; } // not the only point visible: // removing current point involves reweighting rhos for (size_t k = 0; k < numVisible; k++) { if (k == curK) { continue; } for (size_t c = 0; c < numColors; c++) { newColors[c] += wsList[k * numColors + c] * rhoList[k]; } sumRho += rhoList[k]; if (depthList[k] < *newDepth) { *newDepth = depthList[k]; } } for (size_t c = 0; c < numColors; c++) { newColors[c] /= (sumRho + 1e-8); } assert(sumRho > 0); return; } /* compute pixel color after moving a point to a merged pixel */ template <typename scalar_t> __device__ void after_addition(const int numColors, const int topK, const scalar_t rho, const scalar_t *ws, const scalar_t pointDepth, const scalar_t depthThres, const scalar_t *depthList, const uint8_t *curIsBehind, // topK const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *curPixel, // numColors scalar_t *newColors, // numColors scalar_t *newDepth) { scalar_t sumRho = rho; for (size_t k = 0; k < topK; k++) { if (curIsBehind[k] > 0 || (depthList[k] - depthThres) > pointDepth) { // || (depthList[k] - depthThres) > pointDepth break; } sumRho += rhoList[k]; } if (sumRho == 0) { sumRho += 1e-5; } for (size_t c = 0; c < numColors; c++) { newColors[c] = rho / sumRho * ws[c]; } for (size_t k = 0; k < topK; k++) { for (size_t c = 0; c < numColors; c++) { if (curIsBehind[k] > 0 || (depthList[k] - depthThres) > pointDepth) { // || (depthList[k] - depthThres) > pointDepth break; } newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c]; } } *newDepth = min(depthList[0], pointDepth); } /* compute pixel color after moving a point closer to the screen */ template <typename scalar_t> __device__ void after_drawing_closer(const int numColors, const int topK, const int curK, const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *depthList, // topK const uint8_t *isBehind, // topK scalar_t *newColors, scalar_t *newDepth) { scalar_t curRho = rhoList[curK]; const scalar_t *curW = wsList + curK * numColors; scalar_t pointDepth = depthList[curK]; scalar_t sumRho = curRho; for (size_t k = 0; k < topK; k++) { if (isBehind[k] > 0) { break; } sumRho += rhoList[k]; } // should at least have curRho assert(sumRho > 0); for (size_t c = 0; c < numColors; c++) { newColors[c] = curRho / sumRho * curW[c]; } for (size_t k = 0; k < topK; k++) { for (size_t c = 0; c < numColors; c++) { if (isBehind[k] > 0) { break; } newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c]; } } *newDepth = min(depthList[0], pointDepth); } template <typename scalar_t> __device__ scalar_t eps_guard(scalar_t v) { const scalar_t eps = 0.01; if (v < 0) { return v - eps; } if (v >= 0) { return v + eps; } // return v; } /* a point is not "bad", i.e. don't need to be moved, when it's colorGrads is zero within its effective extent (pointIdxMap include pointID && rhoMap > 0) */ template <typename scalar_t, typename indice_t> __global__ void whitelist_points( const int imgHeight, const int imgWidth, const int topK, const int PN, const int batchSize, const int WDim, const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK const uint8_t *__restrict__ isBehind, // BxHxWxtopK const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax uint8_t *whitelist_mask // BxNx1 ) { const int numPixels = imgHeight * imgWidth; const scalar_t eps = 1e-9; // loop all points for (int b = blockIdx.x; b < batchSize; b += gridDim.x) { for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN; p += blockDim.x * gridDim.y) { const size_t curPointIdx = b * PN + p; const indice_t *curBB = boundingBoxes + curPointIdx * 4; const indice_t xmin = curBB[0]; const indice_t ymin = curBB[1]; const indice_t xmax = curBB[2]; const indice_t ymax = curBB[3]; // search within the bounding box bool isGood = true; bool inExtent = false; for (size_t h = ymin; h < ymax; h++) { for (size_t w = xmin; w < xmax; w++) { const indice_t curPixelIdx = b * numPixels + h * imgWidth + w; scalar_t colorGrad = 0.0; for (size_t c = 0; c < WDim; c++) { colorGrad += abs(colorGrads[curPixelIdx * WDim + c]); } // temporary flag for current pixel bool _isGood = true; bool _inExtent = false; for (size_t k = 0; k < topK; k++) { // inside the extent and is shown if (pointIdxMap[curPixelIdx * topK + k] == p) { _inExtent = true; // is bad if some pixel inside the splat radius is not shown // (isBehind) or colorGrad > threshold _isGood = !(isBehind[curPixelIdx * topK + k] > 0 || (colorGrad) > eps); } } // there is one pixel in extent inExtent = inExtent | _inExtent; // as long as one pixel is not good, this point is not good isGood = _isGood & isGood; } } // if all pixels are not in extent, then this point is bad whitelist_mask[curPointIdx] = inExtent & isGood; } } } /* */ template <typename scalar_t, typename indice_t> __global__ void visibility_backward_kernel( const int batchSize, const int imgHeight, const int imgWidth, const int localHeight, const int localWidth, const int topK, const int PN, const int projDim, const int WDim, const scalar_t focalL, const scalar_t mergeT, const bool considerZ, const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK const scalar_t *__restrict__ rhoMap, // BxHxWxtopK const scalar_t *__restrict__ wsMap, // BxHxWxtopKx3 const scalar_t *__restrict__ depthMap, // BxHxWxtopK const uint8_t *__restrict__ isBehind, // BxHxWxtopK const scalar_t *__restrict__ pixelValues, // BxHxWx3 const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax const scalar_t *__restrict__ projPoints, // BxNx[2or3], xy1 const scalar_t *__restrict__ pointColors, // BxNx3 const scalar_t *__restrict__ depthValues, // BxNx1 const scalar_t *__restrict__ rhoValues, // BxNx1 scalar_t *__restrict__ dIdp, // BxNx2 gradients for screenX and screenY scalar_t *__restrict__ dIdz) // BxNx1 gradients for z { // const scalar_t mergeT = scalar_t(mergeThres); // const scalar_t focalL = scalar_t(focalLength); const int numPixels = imgHeight * imgWidth; // loop all points for (int b = blockIdx.x; b < batchSize; b += gridDim.x) { for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN; p += blockDim.x * gridDim.y) { const indice_t curPointIdx = b * PN + p; // skip point (gradient=0) if mask == 1 (i.e. point is good) scalar_t xmin = scalar_t(boundingBoxes[curPointIdx * 4]); scalar_t ymin = scalar_t(boundingBoxes[curPointIdx * 4 + 1]); // scalar_t xmax = scalar_t(boundingBoxes[curPointIdx * 4 + 2]); // scalar_t ymax = scalar_t(boundingBoxes[curPointIdx * 4 + 3]); const scalar_t *curPointColor = pointColors + curPointIdx * WDim; const scalar_t *curProjValues = projPoints + curPointIdx * projDim; scalar_t *dIdx = dIdp + curPointIdx * projDim; scalar_t *dIdy = dIdp + curPointIdx * projDim + 1; scalar_t *curdIdz = dIdz + curPointIdx; const scalar_t rhov = rhoValues[curPointIdx]; const int bH = min(max(0, int(curProjValues[1] - localHeight / 2)), imgHeight); const int eH = max(min(imgHeight, int(curProjValues[1] + localHeight / 2 + 1)), 0); const int bW = min(max(0, int(curProjValues[0] - localWidth / 2)), imgWidth); const int eW = max(min(imgWidth, int(curProjValues[0] + localWidth / 2 + 1)), 0); // loop all pixels for (size_t i = bH; i < eH; i++) { for (size_t j = bW; j < eW; j++) { const indice_t curPixelIdx = (b * numPixels + i * imgWidth + j); const scalar_t *curColorGrad = colorGrads + curPixelIdx * WDim; const scalar_t *curWs = wsMap + curPixelIdx * topK * WDim; const scalar_t *curRhos = rhoMap + curPixelIdx * topK; // const indice_t curClosest = pointIdxMap[curPixelIdx * topK]; // const indice_t curClosestIdx = b * PN + curClosest; const indice_t *curIdxList = pointIdxMap + curPixelIdx * topK; const scalar_t *curPixelValues = pixelValues + curPixelIdx * WDim; const scalar_t *curDepthList = depthMap + curPixelIdx * topK; // const scalar_t curClosestDepth = depthMap[curPixelIdx * topK]; const uint8_t *curIsBehind = isBehind + curPixelIdx * topK; const scalar_t curPointDepth = depthValues[curPointIdx]; // is this pixel inside the splat? int curK; is_inside(topK, curIdxList, curPointIdx, &curK); scalar_t didxv = 0.0; scalar_t didyv = 0.0; scalar_t didzv = 0.0; scalar_t dldI = 0.0; scalar_t newColors[10]; scalar_t newDepth; // outside if (curK < 0) { after_addition(WDim, topK, rhov, curPointColor, curPointDepth, mergeT, curDepthList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0.0) { // another point at pixel i,j is in front of the current point by // a threshold, need to change z, otherwise moving to that // direction won't change the color value if (curPointDepth - newDepth > mergeT) { if (!considerZ) { continue; } scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t dx_3d = (scalar_t(j) - curProjValues[0]) / focalL / imgWidth * 2 * curPointDepth; scalar_t dy_3d = (scalar_t(i) - curProjValues[1]) / focalL / imgHeight * 2 * curPointDepth; assert(newDepth < curPointDepth); scalar_t dz_3d = newDepth - curPointDepth; scalar_t distance2_3d = eps_guard(dx_3d * dx_3d + dy_3d * dy_3d + dz_3d * dz_3d); scalar_t distance2 = eps_guard(dx * dx + dy * dy); didzv = dldI / distance2_3d * dz_3d; // should rescale to screen space didxv = dldI / distance2 * dx; didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); } // don't need to change z else { scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance2 = eps_guard(dx * dx + dy * dy); // dIdx didxv = dldI / distance2 * dx; // dIdy didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); } } } // pixel inside splat else { // is the current point shown? if (curIsBehind[curK] < 1) { // dIdx dIdy and dIdz- after_removal(WDim, topK, curK, mergeT, curDepthList, curIdxList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0) { // dIdp = (dIdp+) + (dIdp-) scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance = sqrt(eps_guard(dx * dx + dy * dy)); scalar_t rx = curProjValues[0] - xmin; scalar_t ry = curProjValues[1] - ymin; assert(rx > 0); assert(ry > 0); scalar_t r = max(rx, ry); didxv = dldI * dx / eps_guard((r + distance) * distance) + dldI * dx / eps_guard((distance - r) * distance); didyv = dldI * dy / eps_guard((r + distance) * distance) + dldI * dy / eps_guard((distance - r) * distance); assert(!isnan(didxv)); assert(!isnan(didyv)); } } // endif (curRhos[curK] > 0) // point is not visible: else { if (!considerZ) continue; // this point is occluded by other points, moving closer will // change the color after_drawing_closer(WDim, topK, curK, curWs, curRhos, curDepthList, curIsBehind, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0.0) { didzv = dldI / eps_guard(newDepth - curPointDepth); } } // endif on top } // endif inside (*curdIdz) += didzv; (*dIdx) += didxv; (*dIdy) += didyv; } // imWidth } // imHeight } // point } // batch } // dIdp BxNx2 dx dy, dIdz BxNx1 std::vector<at::Tensor> visibility_backward_cuda(const double focalLength, const double mergeThres, const bool considerZ, const int localHeight, const int localWidth, const at::Tensor &colorGrads, // BxHxWxWDim const at::Tensor &pointIdxMap, // BxHxWxtopK const at::Tensor &rhoMap, // BxHxWxtopK const at::Tensor &wsMap, // BxHxWxtopKxWDim const at::Tensor &depthMap, // BxHxWxtopK const at::Tensor &isBehind, // BxHxWxtopK const at::Tensor &pixelValues, // BxHxWxWDim const at::Tensor &boundingBoxes, // BxNx4 const at::Tensor &projPoints, // BxNx[2or3] const at::Tensor &pointColors, // BxNxWDim const at::Tensor &depthValues, // BxNx1 const at::Tensor &rhoValues, // BxNx1 at::Tensor &dIdp, at::Tensor &dIdz) { const int batchSize = pointIdxMap.size(0); const int imgHeight = pointIdxMap.size(1); const int imgWidth = pointIdxMap.size(2); const int topK = pointIdxMap.size(3); const int PN = projPoints.size(1); const int WDim = pointColors.size(2); CHECK(projPoints.size(2) == 2 || projPoints.size(2) == 3); const int projDim = projPoints.size(2); CHECK_EQ(pointColors.size(1), PN); CHECK(colorGrads.size(-1) == wsMap.size(-1) && wsMap.size(-1) == pixelValues.size(-1) && pixelValues.size(-1) == pointColors.size(-1)); std::vector<at::Tensor> outputs; unsigned int n_threads, n_blocks; n_threads = opt_n_threads(PN); n_blocks = min(32, (PN * batchSize + n_threads - 1) / n_threads); // initialize with zeros dIdp.zero_(); dIdz.zero_(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); std::vector<at::Tensor> output; AT_DISPATCH_FLOATING_TYPES_AND_HALF( colorGrads.type(), "visibility_backward_kernel", ([&] { hipLaunchKernelGGL(( visibility_backward_kernel<scalar_t, int64_t>) , dim3(dim3(batchSize, n_blocks, 1)), dim3(n_threads), 0, stream, batchSize, imgHeight, imgWidth, localHeight, localWidth, topK, PN, projDim, WDim, focalLength, mergeThres, considerZ, colorGrads.data<scalar_t>(), // BxHxWx3 pointIdxMap.data<int64_t>(), // BxHxWxtopK rhoMap.data<scalar_t>(), // BxHxWxtopK wsMap.data<scalar_t>(), // BxHxWxtopKx3 depthMap.data<scalar_t>(), // BxHxWxtopK isBehind.data<uint8_t>(), // BxHxWxtopK pixelValues.data<scalar_t>(), // BxHxWx3 boundingBoxes.toType(pointIdxMap.scalar_type()) .data<int64_t>(), // BxNx4 xmin ymin xmax ymax projPoints.data<scalar_t>(), // BxNx[2or3], xy1 pointColors.data<scalar_t>(), // BxNx3 depthValues.data<scalar_t>(), // BxNx1 rhoValues.data<scalar_t>(), // BxNx1 dIdp.data<scalar_t>(), // BxNx2 gradients for projX,Y dIdz.data<scalar_t>() // BxNx1 ); // BxHxWx8 })); output.push_back(dIdp); output.push_back(dIdz); hipError_t err = hipDeviceSynchronize(); if (err != hipSuccess) { printf("compute_visiblity_maps_cuda kernel failed: %s\n", hipGetErrorString(err)); exit(-1); } return output; }
aec53a1ddce532c1d261b3a1777283b60bfd265f.cu
#include "cuda_utils.h" #include "macros.hpp" #include <ATen/ExpandUtils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAUtils.h> #include <c10/core/ScalarType.h> #include <stdio.h> #include <torch/extension.h> /* return the indice of current point in the idxList -1 outside >= 0 inside */ template <typename indice_t> __device__ void is_inside(const int topK, const indice_t *__restrict__ idxList, const indice_t curr_Idx, int *curK) { for (size_t i = 0; i < topK; i++) { // a pixel is inside the splat if idxList contains point index if (idxList[i] == curr_Idx) { *curK = i; return; } // a pixel definitely isn't inside a splat if it's not occupied by any point if (idxList[i] == -1) { *curK = -1; return; } } *curK = -1; return; } /* compute pixel color after removing a point from a merged pixel */ // TODO curPointList probably no necessary, since rhoList and WsList will be // zero at curPointList[k] == -1 template <typename scalar_t, typename indice_t> __device__ void after_removal(const int numColors, const int topK, const int curK, const scalar_t depthThres, const scalar_t *depthList, const indice_t *curPointList, // topK const uint8_t *curIsBehind, // topK const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *curPixel, // numColors scalar_t *newColors, // numColors scalar_t *newDepth) { // initialize color with 0.0 for (size_t c = 0; c < numColors; c++) { newColors[c] = 0.0; } // initialize depth with the farthest so far *newDepth = depthList[topK - 1]; scalar_t sumRho = 0.0; int numVisible = 0; for (size_t k = 0; k < topK; k++) { if (curIsBehind[k] == 0) ++numVisible; } // if it's the only visible point, then removing it will reveal the // color below assert(numVisible >= 0); if (numVisible == 1) { sumRho = 0.0; // CHECK: should be the second? scalar_t curDepth = depthList[1]; { size_t k = curK + 1; while (k < topK) { // as soon as idxList is -1 or depth > currentDepth+threshold // stop accumulating colors if (curPointList[k] == -1) { break; } if ((depthList[k] - curDepth) > depthThres) { break; } for (size_t c = 0; c < numColors; c++) { newColors[c] += wsList[k * numColors + c] * rhoList[k]; } sumRho += rhoList[k]; if (depthList[k] < *newDepth) { *newDepth = depthList[k]; } ++k; } } for (size_t c = 0; c < numColors; c++) { newColors[c] /= (sumRho + 1e-8); } return; } // not the only point visible: // removing current point involves reweighting rhos for (size_t k = 0; k < numVisible; k++) { if (k == curK) { continue; } for (size_t c = 0; c < numColors; c++) { newColors[c] += wsList[k * numColors + c] * rhoList[k]; } sumRho += rhoList[k]; if (depthList[k] < *newDepth) { *newDepth = depthList[k]; } } for (size_t c = 0; c < numColors; c++) { newColors[c] /= (sumRho + 1e-8); } assert(sumRho > 0); return; } /* compute pixel color after moving a point to a merged pixel */ template <typename scalar_t> __device__ void after_addition(const int numColors, const int topK, const scalar_t rho, const scalar_t *ws, const scalar_t pointDepth, const scalar_t depthThres, const scalar_t *depthList, const uint8_t *curIsBehind, // topK const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *curPixel, // numColors scalar_t *newColors, // numColors scalar_t *newDepth) { scalar_t sumRho = rho; for (size_t k = 0; k < topK; k++) { if (curIsBehind[k] > 0 || (depthList[k] - depthThres) > pointDepth) { // || (depthList[k] - depthThres) > pointDepth break; } sumRho += rhoList[k]; } if (sumRho == 0) { sumRho += 1e-5; } for (size_t c = 0; c < numColors; c++) { newColors[c] = rho / sumRho * ws[c]; } for (size_t k = 0; k < topK; k++) { for (size_t c = 0; c < numColors; c++) { if (curIsBehind[k] > 0 || (depthList[k] - depthThres) > pointDepth) { // || (depthList[k] - depthThres) > pointDepth break; } newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c]; } } *newDepth = min(depthList[0], pointDepth); } /* compute pixel color after moving a point closer to the screen */ template <typename scalar_t> __device__ void after_drawing_closer(const int numColors, const int topK, const int curK, const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *depthList, // topK const uint8_t *isBehind, // topK scalar_t *newColors, scalar_t *newDepth) { scalar_t curRho = rhoList[curK]; const scalar_t *curW = wsList + curK * numColors; scalar_t pointDepth = depthList[curK]; scalar_t sumRho = curRho; for (size_t k = 0; k < topK; k++) { if (isBehind[k] > 0) { break; } sumRho += rhoList[k]; } // should at least have curRho assert(sumRho > 0); for (size_t c = 0; c < numColors; c++) { newColors[c] = curRho / sumRho * curW[c]; } for (size_t k = 0; k < topK; k++) { for (size_t c = 0; c < numColors; c++) { if (isBehind[k] > 0) { break; } newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c]; } } *newDepth = min(depthList[0], pointDepth); } template <typename scalar_t> __device__ scalar_t eps_guard(scalar_t v) { const scalar_t eps = 0.01; if (v < 0) { return v - eps; } if (v >= 0) { return v + eps; } // return v; } /* a point is not "bad", i.e. don't need to be moved, when it's colorGrads is zero within its effective extent (pointIdxMap include pointID && rhoMap > 0) */ template <typename scalar_t, typename indice_t> __global__ void whitelist_points( const int imgHeight, const int imgWidth, const int topK, const int PN, const int batchSize, const int WDim, const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK const uint8_t *__restrict__ isBehind, // BxHxWxtopK const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax uint8_t *whitelist_mask // BxNx1 ) { const int numPixels = imgHeight * imgWidth; const scalar_t eps = 1e-9; // loop all points for (int b = blockIdx.x; b < batchSize; b += gridDim.x) { for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN; p += blockDim.x * gridDim.y) { const size_t curPointIdx = b * PN + p; const indice_t *curBB = boundingBoxes + curPointIdx * 4; const indice_t xmin = curBB[0]; const indice_t ymin = curBB[1]; const indice_t xmax = curBB[2]; const indice_t ymax = curBB[3]; // search within the bounding box bool isGood = true; bool inExtent = false; for (size_t h = ymin; h < ymax; h++) { for (size_t w = xmin; w < xmax; w++) { const indice_t curPixelIdx = b * numPixels + h * imgWidth + w; scalar_t colorGrad = 0.0; for (size_t c = 0; c < WDim; c++) { colorGrad += abs(colorGrads[curPixelIdx * WDim + c]); } // temporary flag for current pixel bool _isGood = true; bool _inExtent = false; for (size_t k = 0; k < topK; k++) { // inside the extent and is shown if (pointIdxMap[curPixelIdx * topK + k] == p) { _inExtent = true; // is bad if some pixel inside the splat radius is not shown // (isBehind) or colorGrad > threshold _isGood = !(isBehind[curPixelIdx * topK + k] > 0 || (colorGrad) > eps); } } // there is one pixel in extent inExtent = inExtent | _inExtent; // as long as one pixel is not good, this point is not good isGood = _isGood & isGood; } } // if all pixels are not in extent, then this point is bad whitelist_mask[curPointIdx] = inExtent & isGood; } } } /* */ template <typename scalar_t, typename indice_t> __global__ void visibility_backward_kernel( const int batchSize, const int imgHeight, const int imgWidth, const int localHeight, const int localWidth, const int topK, const int PN, const int projDim, const int WDim, const scalar_t focalL, const scalar_t mergeT, const bool considerZ, const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK const scalar_t *__restrict__ rhoMap, // BxHxWxtopK const scalar_t *__restrict__ wsMap, // BxHxWxtopKx3 const scalar_t *__restrict__ depthMap, // BxHxWxtopK const uint8_t *__restrict__ isBehind, // BxHxWxtopK const scalar_t *__restrict__ pixelValues, // BxHxWx3 const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax const scalar_t *__restrict__ projPoints, // BxNx[2or3], xy1 const scalar_t *__restrict__ pointColors, // BxNx3 const scalar_t *__restrict__ depthValues, // BxNx1 const scalar_t *__restrict__ rhoValues, // BxNx1 scalar_t *__restrict__ dIdp, // BxNx2 gradients for screenX and screenY scalar_t *__restrict__ dIdz) // BxNx1 gradients for z { // const scalar_t mergeT = scalar_t(mergeThres); // const scalar_t focalL = scalar_t(focalLength); const int numPixels = imgHeight * imgWidth; // loop all points for (int b = blockIdx.x; b < batchSize; b += gridDim.x) { for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN; p += blockDim.x * gridDim.y) { const indice_t curPointIdx = b * PN + p; // skip point (gradient=0) if mask == 1 (i.e. point is good) scalar_t xmin = scalar_t(boundingBoxes[curPointIdx * 4]); scalar_t ymin = scalar_t(boundingBoxes[curPointIdx * 4 + 1]); // scalar_t xmax = scalar_t(boundingBoxes[curPointIdx * 4 + 2]); // scalar_t ymax = scalar_t(boundingBoxes[curPointIdx * 4 + 3]); const scalar_t *curPointColor = pointColors + curPointIdx * WDim; const scalar_t *curProjValues = projPoints + curPointIdx * projDim; scalar_t *dIdx = dIdp + curPointIdx * projDim; scalar_t *dIdy = dIdp + curPointIdx * projDim + 1; scalar_t *curdIdz = dIdz + curPointIdx; const scalar_t rhov = rhoValues[curPointIdx]; const int bH = min(max(0, int(curProjValues[1] - localHeight / 2)), imgHeight); const int eH = max(min(imgHeight, int(curProjValues[1] + localHeight / 2 + 1)), 0); const int bW = min(max(0, int(curProjValues[0] - localWidth / 2)), imgWidth); const int eW = max(min(imgWidth, int(curProjValues[0] + localWidth / 2 + 1)), 0); // loop all pixels for (size_t i = bH; i < eH; i++) { for (size_t j = bW; j < eW; j++) { const indice_t curPixelIdx = (b * numPixels + i * imgWidth + j); const scalar_t *curColorGrad = colorGrads + curPixelIdx * WDim; const scalar_t *curWs = wsMap + curPixelIdx * topK * WDim; const scalar_t *curRhos = rhoMap + curPixelIdx * topK; // const indice_t curClosest = pointIdxMap[curPixelIdx * topK]; // const indice_t curClosestIdx = b * PN + curClosest; const indice_t *curIdxList = pointIdxMap + curPixelIdx * topK; const scalar_t *curPixelValues = pixelValues + curPixelIdx * WDim; const scalar_t *curDepthList = depthMap + curPixelIdx * topK; // const scalar_t curClosestDepth = depthMap[curPixelIdx * topK]; const uint8_t *curIsBehind = isBehind + curPixelIdx * topK; const scalar_t curPointDepth = depthValues[curPointIdx]; // is this pixel inside the splat? int curK; is_inside(topK, curIdxList, curPointIdx, &curK); scalar_t didxv = 0.0; scalar_t didyv = 0.0; scalar_t didzv = 0.0; scalar_t dldI = 0.0; scalar_t newColors[10]; scalar_t newDepth; // outside if (curK < 0) { after_addition(WDim, topK, rhov, curPointColor, curPointDepth, mergeT, curDepthList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0.0) { // another point at pixel i,j is in front of the current point by // a threshold, need to change z, otherwise moving to that // direction won't change the color value if (curPointDepth - newDepth > mergeT) { if (!considerZ) { continue; } scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t dx_3d = (scalar_t(j) - curProjValues[0]) / focalL / imgWidth * 2 * curPointDepth; scalar_t dy_3d = (scalar_t(i) - curProjValues[1]) / focalL / imgHeight * 2 * curPointDepth; assert(newDepth < curPointDepth); scalar_t dz_3d = newDepth - curPointDepth; scalar_t distance2_3d = eps_guard(dx_3d * dx_3d + dy_3d * dy_3d + dz_3d * dz_3d); scalar_t distance2 = eps_guard(dx * dx + dy * dy); didzv = dldI / distance2_3d * dz_3d; // should rescale to screen space didxv = dldI / distance2 * dx; didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); } // don't need to change z else { scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance2 = eps_guard(dx * dx + dy * dy); // dIdx didxv = dldI / distance2 * dx; // dIdy didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); } } } // pixel inside splat else { // is the current point shown? if (curIsBehind[curK] < 1) { // dIdx dIdy and dIdz- after_removal(WDim, topK, curK, mergeT, curDepthList, curIdxList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0) { // dIdp = (dIdp+) + (dIdp-) scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance = sqrt(eps_guard(dx * dx + dy * dy)); scalar_t rx = curProjValues[0] - xmin; scalar_t ry = curProjValues[1] - ymin; assert(rx > 0); assert(ry > 0); scalar_t r = max(rx, ry); didxv = dldI * dx / eps_guard((r + distance) * distance) + dldI * dx / eps_guard((distance - r) * distance); didyv = dldI * dy / eps_guard((r + distance) * distance) + dldI * dy / eps_guard((distance - r) * distance); assert(!isnan(didxv)); assert(!isnan(didyv)); } } // endif (curRhos[curK] > 0) // point is not visible: else { if (!considerZ) continue; // this point is occluded by other points, moving closer will // change the color after_drawing_closer(WDim, topK, curK, curWs, curRhos, curDepthList, curIsBehind, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0.0) { didzv = dldI / eps_guard(newDepth - curPointDepth); } } // endif on top } // endif inside (*curdIdz) += didzv; (*dIdx) += didxv; (*dIdy) += didyv; } // imWidth } // imHeight } // point } // batch } // dIdp BxNx2 dx dy, dIdz BxNx1 std::vector<at::Tensor> visibility_backward_cuda(const double focalLength, const double mergeThres, const bool considerZ, const int localHeight, const int localWidth, const at::Tensor &colorGrads, // BxHxWxWDim const at::Tensor &pointIdxMap, // BxHxWxtopK const at::Tensor &rhoMap, // BxHxWxtopK const at::Tensor &wsMap, // BxHxWxtopKxWDim const at::Tensor &depthMap, // BxHxWxtopK const at::Tensor &isBehind, // BxHxWxtopK const at::Tensor &pixelValues, // BxHxWxWDim const at::Tensor &boundingBoxes, // BxNx4 const at::Tensor &projPoints, // BxNx[2or3] const at::Tensor &pointColors, // BxNxWDim const at::Tensor &depthValues, // BxNx1 const at::Tensor &rhoValues, // BxNx1 at::Tensor &dIdp, at::Tensor &dIdz) { const int batchSize = pointIdxMap.size(0); const int imgHeight = pointIdxMap.size(1); const int imgWidth = pointIdxMap.size(2); const int topK = pointIdxMap.size(3); const int PN = projPoints.size(1); const int WDim = pointColors.size(2); CHECK(projPoints.size(2) == 2 || projPoints.size(2) == 3); const int projDim = projPoints.size(2); CHECK_EQ(pointColors.size(1), PN); CHECK(colorGrads.size(-1) == wsMap.size(-1) && wsMap.size(-1) == pixelValues.size(-1) && pixelValues.size(-1) == pointColors.size(-1)); std::vector<at::Tensor> outputs; unsigned int n_threads, n_blocks; n_threads = opt_n_threads(PN); n_blocks = min(32, (PN * batchSize + n_threads - 1) / n_threads); // initialize with zeros dIdp.zero_(); dIdz.zero_(); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); std::vector<at::Tensor> output; AT_DISPATCH_FLOATING_TYPES_AND_HALF( colorGrads.type(), "visibility_backward_kernel", ([&] { visibility_backward_kernel<scalar_t, int64_t> <<<dim3(batchSize, n_blocks, 1), n_threads, 0, stream>>>( batchSize, imgHeight, imgWidth, localHeight, localWidth, topK, PN, projDim, WDim, focalLength, mergeThres, considerZ, colorGrads.data<scalar_t>(), // BxHxWx3 pointIdxMap.data<int64_t>(), // BxHxWxtopK rhoMap.data<scalar_t>(), // BxHxWxtopK wsMap.data<scalar_t>(), // BxHxWxtopKx3 depthMap.data<scalar_t>(), // BxHxWxtopK isBehind.data<uint8_t>(), // BxHxWxtopK pixelValues.data<scalar_t>(), // BxHxWx3 boundingBoxes.toType(pointIdxMap.scalar_type()) .data<int64_t>(), // BxNx4 xmin ymin xmax ymax projPoints.data<scalar_t>(), // BxNx[2or3], xy1 pointColors.data<scalar_t>(), // BxNx3 depthValues.data<scalar_t>(), // BxNx1 rhoValues.data<scalar_t>(), // BxNx1 dIdp.data<scalar_t>(), // BxNx2 gradients for projX,Y dIdz.data<scalar_t>() // BxNx1 ); // BxHxWx8 })); output.push_back(dIdp); output.push_back(dIdz); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { printf("compute_visiblity_maps_cuda kernel failed: %s\n", cudaGetErrorString(err)); exit(-1); } return output; }
db197062cd4949f9ae6132aaa476819764131f5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #define BLOCK_SIZE 16 /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: gpu_square_matrix_mult description: dot product of two matrix (not only square) in GPU parameters: &a GPU device pointer to a n X n matrix (A) &b GPU device pointer to a n X n matrix (B) &c GPU device output purpose pointer to a n X n matrix (C) to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n) { __shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; int tmp = 0; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if(idx >= n*n) { // n may not divisible by BLOCK_SIZE tile_a[threadIdx.y][threadIdx.x] = 0; } else { tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; } idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if(idx >= n*n) { tile_b[threadIdx.y][threadIdx.x] = 0; } else { tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if(row < n && col < n) { d_result[row * n + col] = tmp; } } /* ********************************************************************* function name: gpu_matrix_transpose description: matrix transpose parameters: &mat_in GPU device pointer to a rows X cols matrix &mat_out GPU device output purpose pointer to a cols X rows matrix to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < cols && idy < rows) { unsigned int pos = idy * cols + idx; unsigned int trans_pos = idx * rows + idy; mat_out[trans_pos] = mat_in[pos]; } } /* ********************************************************************* function name: cpu_matrix_mult description: dot product of two matrix (not only square) in CPU, for validating GPU results parameters: &a CPU host pointer to a m X n matrix (A) &b CPU host pointer to a n X k matrix (B) &c CPU host output purpose pointer to a m X k matrix (C) to store the result return: none ********************************************************************* */ void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) { for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { int tmp = 0.0; for (int h = 0; h < n; ++h) { tmp += h_a[i * n + h] * h_b[h * k + j]; } h_result[i * k + j] = tmp; } } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m, n, k; /* Fixed seed for illustration */ srand(3333); printf("please type in m n and k\n"); scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c, *h_cc; hipHostMalloc((void **) &h_a, sizeof(int)*m*n); hipHostMalloc((void **) &h_b, sizeof(int)*n*k); hipHostMalloc((void **) &h_c, sizeof(int)*m*k); hipHostMalloc((void **) &h_cc, sizeof(int)*m*k); // random initialize matrix A for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; } } // random initialize matrix B for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms, cpu_elapsed_time_ms; // some events to count the execution time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // start to count execution time of GPU version hipEventRecord(start, 0); // Allocate memory space on the device int *d_a, *d_b, *d_c; hipMalloc((void **) &d_a, sizeof(int)*m*n); hipMalloc((void **) &d_b, sizeof(int)*n*k); hipMalloc((void **) &d_c, sizeof(int)*m*k); // copy matrix A and B from host to device memory hipMemcpy(d_a, h_a, sizeof(int)*m*n, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, sizeof(int)*n*k, hipMemcpyHostToDevice); // Launch kernel if(m == n && n == k) { //unsigned int grid_rows = sqrt(BLOCK_SIZE); //unsigned int grid_cols = m/ grid_rows; //if(size % grid_rows != 0){ //grid_cols++;} //dim3 dimGrid(grid_cols, grid_cols,1); //dim3 dimBlock(grid_rows, grid_rows,1); //this is the correct kernal size for different thread sizes.. hipLaunchKernelGGL(( gpu_square_matrix_mult), dim3(1), dim3(32), 0, 0, d_a, d_b, d_c, n); } else { hipLaunchKernelGGL(( gpu_matrix_mult), dim3(1), dim3(32), 0, 0, d_a, d_b, d_c, m, n, k); } // Transefr results from device to host hipMemcpy(h_c, d_c, sizeof(int)*m*k, hipMemcpyDeviceToHost); hipDeviceSynchronize(); // time counting terminate hipEventRecord(stop, 0); hipEventSynchronize(stop); // compute time elapse on GPU computing hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms); // start the CPU version hipEventRecord(start, 0); cpu_matrix_mult(h_a, h_b, h_cc, m, n, k); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&cpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms); // validate results computed by GPU int all_ok = 1; for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { //printf("[%d][%d]:%d == [%d][%d]:%d, ", i, j, h_cc[i*k + j], i, j, h_c[i*k + j]); if(h_cc[i*k + j] != h_c[i*k + j]) { all_ok = 0; } } //printf("\n"); } // roughly compute speedup if(all_ok) { printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms); } else { printf("incorrect results\n"); } // free memory hipFree(d_a); hipFree(d_b); hipFree(d_c); hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); hipHostFree(h_cc); return 0; }
db197062cd4949f9ae6132aaa476819764131f5e.cu
/* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #define BLOCK_SIZE 16 /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: gpu_square_matrix_mult description: dot product of two matrix (not only square) in GPU parameters: &a GPU device pointer to a n X n matrix (A) &b GPU device pointer to a n X n matrix (B) &c GPU device output purpose pointer to a n X n matrix (C) to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n) { __shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; int tmp = 0; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if(idx >= n*n) { // n may not divisible by BLOCK_SIZE tile_a[threadIdx.y][threadIdx.x] = 0; } else { tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; } idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if(idx >= n*n) { tile_b[threadIdx.y][threadIdx.x] = 0; } else { tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if(row < n && col < n) { d_result[row * n + col] = tmp; } } /* ********************************************************************* function name: gpu_matrix_transpose description: matrix transpose parameters: &mat_in GPU device pointer to a rows X cols matrix &mat_out GPU device output purpose pointer to a cols X rows matrix to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < cols && idy < rows) { unsigned int pos = idy * cols + idx; unsigned int trans_pos = idx * rows + idy; mat_out[trans_pos] = mat_in[pos]; } } /* ********************************************************************* function name: cpu_matrix_mult description: dot product of two matrix (not only square) in CPU, for validating GPU results parameters: &a CPU host pointer to a m X n matrix (A) &b CPU host pointer to a n X k matrix (B) &c CPU host output purpose pointer to a m X k matrix (C) to store the result return: none ********************************************************************* */ void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) { for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { int tmp = 0.0; for (int h = 0; h < n; ++h) { tmp += h_a[i * n + h] * h_b[h * k + j]; } h_result[i * k + j] = tmp; } } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m, n, k; /* Fixed seed for illustration */ srand(3333); printf("please type in m n and k\n"); scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c, *h_cc; cudaMallocHost((void **) &h_a, sizeof(int)*m*n); cudaMallocHost((void **) &h_b, sizeof(int)*n*k); cudaMallocHost((void **) &h_c, sizeof(int)*m*k); cudaMallocHost((void **) &h_cc, sizeof(int)*m*k); // random initialize matrix A for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; } } // random initialize matrix B for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms, cpu_elapsed_time_ms; // some events to count the execution time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // start to count execution time of GPU version cudaEventRecord(start, 0); // Allocate memory space on the device int *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, sizeof(int)*m*n); cudaMalloc((void **) &d_b, sizeof(int)*n*k); cudaMalloc((void **) &d_c, sizeof(int)*m*k); // copy matrix A and B from host to device memory cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice); // Launch kernel if(m == n && n == k) { //unsigned int grid_rows = sqrt(BLOCK_SIZE); //unsigned int grid_cols = m/ grid_rows; //if(size % grid_rows != 0){ //grid_cols++;} //dim3 dimGrid(grid_cols, grid_cols,1); //dim3 dimBlock(grid_rows, grid_rows,1); //this is the correct kernal size for different thread sizes.. gpu_square_matrix_mult<<<1, 32>>>(d_a, d_b, d_c, n); } else { gpu_matrix_mult<<<1, 32>>>(d_a, d_b, d_c, m, n, k); } // Transefr results from device to host cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // time counting terminate cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // compute time elapse on GPU computing cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms); // start the CPU version cudaEventRecord(start, 0); cpu_matrix_mult(h_a, h_b, h_cc, m, n, k); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms); // validate results computed by GPU int all_ok = 1; for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { //printf("[%d][%d]:%d == [%d][%d]:%d, ", i, j, h_cc[i*k + j], i, j, h_c[i*k + j]); if(h_cc[i*k + j] != h_c[i*k + j]) { all_ok = 0; } } //printf("\n"); } // roughly compute speedup if(all_ok) { printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms); } else { printf("incorrect results\n"); } // free memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFreeHost(h_cc); return 0; }
46fa62a5c0624be6c6a58c4d68b80d4c6e3f0a78.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2016 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <cudnn.h> #include <hip/hip_runtime.h> #include <stdio.h> // Reference outputs (calculated on an M40 GPU) // > ./RNN 20 2 512 64 0 // Forward: 1299 GFLOPs // Backward: 2171 GFLOPs, (1564 GFLOPs), (3549 GFLOPs) // i checksum 1.315793E+06 h checksum 1.315212E+05 // di checksum 6.676003E+01 dh checksum 6.425067E+01 // dw checksum 1.453750E+09 // // > ./RNN 20 2 512 64 1 // Forward: 1296 GFLOPs // Backward: 2235 GFLOPs, (1567 GFLOPs), (3896 GFLOPs) // i checksum 6.319591E+05 h checksum 6.319605E+04 // di checksum 4.501830E+00 dh checksum 4.489546E+00 // dw checksum 5.012598E+07 // // > ./RNN 20 2 512 64 2 // Forward: 2635 GFLOPs // Backward: 2757 GFLOPs, (2001 GFLOPs), (4433 GFLOPs) // i checksum 5.749536E+05 c checksum 4.365091E+05 h checksum 5.774818E+04 // di checksum 3.842206E+02 dc checksum 9.323785E+03 dh checksum 1.182566E+01 // dw checksum 4.313461E+08 // // > ./RNN 20 2 512 64 3 // Forward: 2428 GFLOPs // Backward: 2645 GFLOPs, (1915 GFLOPs), (4270 GFLOPs) // i checksum 6.358978E+05 h checksum 6.281680E+04 // di checksum 6.296622E+00 dh checksum 2.289960E+05 // dw checksum 5.397419E+07 // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(hipError_t stat, const char *file, int line) { if (stat != hipSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line); } } #define cudnnErrCheck(stat) { cudnnErrCheck_((stat), __FILE__, __LINE__); } void cudnnErrCheck_(cudnnStatus_t stat, const char *file, int line) { if (stat != CUDNN_STATUS_SUCCESS) { fprintf(stderr, "cuDNN Error: %s %s %d\n", cudnnGetErrorString(stat), file, line); } } __global__ void initGPUData_ker(float *data, int numElements, float value) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numElements) { data[tid] = value; } } void initGPUData(float *data, int numElements, float value) { dim3 gridDim; dim3 blockDim; blockDim.x = 1024; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; hipLaunchKernelGGL(( initGPUData_ker) , dim3(gridDim), dim3(blockDim) , 0, 0, data, numElements, value); } int main(int argc, char* argv[]) { int seqLength; int numLayers; int hiddenSize; int inputSize; int miniBatch; float dropout; bool bidirectional; int mode; FILE *fp; fp=fopen("result.txt","w"); if (argc == 7) { seqLength = atoi(argv[1]); numLayers = atoi(argv[2]); hiddenSize = atoi(argv[3]); inputSize = hiddenSize; miniBatch = atoi(argv[4]); bidirectional = 0; mode = atoi(argv[5]); dropout = atof(argv[6]); } else { printf("Usage:\n"); printf("./RNN <seqLength> <numLayers> <hiddenSize> <miniBatch> <mode>\n"); printf("Modes: 0 = RNN_RELU, 1 = RNN_TANH, 2 = LSTM, 3 = GRU\n"); return 1; } // ------------------------- // Create cudnn context // ------------------------- cudnnHandle_t cudnnHandle; cudnnErrCheck(cudnnCreate(&cudnnHandle)); // ------------------------- // Set up inputs and outputs // ------------------------- void *x; void *hx = NULL; void *cx = NULL; void *dx; void *dhx = NULL; void *dcx = NULL; void *y; void *hy = NULL; void *cy = NULL; void *dy; void *dhy = NULL; void *dcy = NULL; // Memory allocation. hx, cx, dhx, dcx, hy, cy, dhy and dcy can be NULL. cudaErrCheck(hipMalloc((void**)&x, seqLength * inputSize * miniBatch * sizeof(float))); cudaErrCheck(hipMalloc((void**)&hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dx, seqLength * inputSize * miniBatch * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dhx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dcx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&y, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&hy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&cy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); // Set up tensor descriptors. x/y/dx/dy are arrays, one per time step. cudnnTensorDescriptor_t *xDesc, *yDesc, *dxDesc, *dyDesc; cudnnTensorDescriptor_t hxDesc, cxDesc; cudnnTensorDescriptor_t hyDesc, cyDesc; cudnnTensorDescriptor_t dhxDesc, dcxDesc; cudnnTensorDescriptor_t dhyDesc, dcyDesc; xDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); yDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); dxDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); dyDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); int dimA[3]; int strideA[3]; // In this example dimA[1] is constant across the whole sequence // This isn't required, all that is required is that it does not increase. for (int i = 0; i < seqLength; i++) { cudnnErrCheck(cudnnCreateTensorDescriptor(&xDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&yDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&dxDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&dyDesc[i])); dimA[0] = miniBatch; dimA[1] = inputSize; dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnSetTensorNdDescriptor(xDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dxDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); dimA[0] = miniBatch; dimA[1] = bidirectional ? hiddenSize * 2 : hiddenSize; dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnSetTensorNdDescriptor(yDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dyDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); } dimA[0] = numLayers * (bidirectional ? 2 : 1); dimA[1] = miniBatch; dimA[2] = hiddenSize; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnCreateTensorDescriptor(&hxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&cxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&hyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&cyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dhxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dcxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dhyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dcyDesc)); cudnnErrCheck(cudnnSetTensorNdDescriptor(hxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(cxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(hyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(cyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dhxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dcxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dhyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dcyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); // ------------------------- // Set up the dropout descriptor (needed for the RNN descriptor) // ------------------------- unsigned long long seed = 1337ull; // Pick a seed. cudnnDropoutDescriptor_t dropoutDesc; cudnnErrCheck(cudnnCreateDropoutDescriptor(&dropoutDesc)); // How much memory does dropout need for states? // These states are used to generate random numbers internally // and should not be freed until the RNN descriptor is no longer used size_t stateSize; void *states; cudnnErrCheck(cudnnDropoutGetStatesSize(cudnnHandle, &stateSize)); cudaErrCheck(hipMalloc(&states, stateSize)); cudnnErrCheck(cudnnSetDropoutDescriptor(dropoutDesc, cudnnHandle, dropout, states, stateSize, seed)); printf("dropout = %g, stateSize = %ld\n", dropout, stateSize); // ------------------------- // Set up the RNN descriptor // ------------------------- cudnnRNNDescriptor_t rnnDesc; miopenRNNMode_t RNNMode; cudnnErrCheck(cudnnCreateRNNDescriptor(&rnnDesc)); if (mode == 0) RNNMode = miopenRNNRELU; else if (mode == 1) RNNMode = miopenRNNTANH; else if (mode == 2) RNNMode = miopenLSTM; else if (mode == 3) RNNMode = miopenGRU; cudnnErrCheck(cudnnSetRNNDescriptor(rnnDesc, hiddenSize, numLayers, dropoutDesc, CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation bidirectional ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, RNNMode, CUDNN_DATA_FLOAT)); // ------------------------- // Set up parameters // ------------------------- // This needs to be done after the rnn descriptor is set as otherwise // we don't know how many parameters we have to allocate void *w; void *dw; cudnnFilterDescriptor_t wDesc, dwDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&wDesc)); cudnnErrCheck(cudnnCreateFilterDescriptor(&dwDesc)); size_t weightsSize; cudnnErrCheck(cudnnGetRNNParamsSize(cudnnHandle, rnnDesc, xDesc[0], &weightsSize, CUDNN_DATA_FLOAT)); int dimW[3]; dimW[0] = weightsSize / sizeof(float); dimW[1] = 1; dimW[2] = 1; cudnnErrCheck(cudnnSetFilterNdDescriptor(wDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW)); cudnnErrCheck(cudnnSetFilterNdDescriptor(dwDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW)); cudaErrCheck(hipMalloc((void**)&w, weightsSize)); cudaErrCheck(hipMalloc((void**)&dw, weightsSize)); // ------------------------- // Set up work space and reserved memory // ------------------------- void *workspace; void *reserveSpace; size_t workSize; size_t reserveSize; // Need for every pass cudnnErrCheck(cudnnGetRNNWorkspaceSize(cudnnHandle, rnnDesc, seqLength, xDesc, &workSize)); // Only needed in training, shouldn't be touched between passes. cudnnErrCheck(cudnnGetRNNTrainingReserveSize(cudnnHandle, rnnDesc, seqLength, xDesc, &reserveSize)); cudaErrCheck(hipMalloc((void**)&workspace, workSize)); cudaErrCheck(hipMalloc((void**)&reserveSpace, reserveSize)); // ********************************************************************************************************* // Initialise weights and inputs // ********************************************************************************************************* // We initialise to something simple. // Matrices are initialised to 1 / matrixSize, biases to 1, data is 1. initGPUData((float*)x, seqLength * inputSize * miniBatch, 1.f); if (hx != NULL) initGPUData((float*)hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (cx != NULL) initGPUData((float*)cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); initGPUData((float*)dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (dhy != NULL) initGPUData((float*)dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (dcy != NULL) initGPUData((float*)dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); // Weights int numLinearLayers = 0; if (RNNMode == miopenRNNRELU || RNNMode == miopenRNNTANH) { numLinearLayers = 2; } else if (RNNMode == miopenLSTM) { numLinearLayers = 8; } else if (RNNMode == miopenGRU) { numLinearLayers = 6; } for (int layer = 0; layer < numLayers * (bidirectional ? 2 : 1); layer++) { for (int linLayerID = 0; linLayerID < numLinearLayers; linLayerID++) { cudnnFilterDescriptor_t linLayerMatDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerMatDesc)); float *linLayerMat; cudnnErrCheck(cudnnGetRNNLinLayerMatrixParams( cudnnHandle, rnnDesc, layer, xDesc[0], wDesc, w, linLayerID, linLayerMatDesc, (void**)&linLayerMat)); cudnnDataType_t dataType; cudnnTensorFormat_t format; int nbDims; int filterDimA[3]; cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerMatDesc, 3, &dataType, &format, &nbDims, filterDimA)); initGPUData(linLayerMat, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f / (float)(filterDimA[0] * filterDimA[1] * filterDimA[2])); cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerMatDesc)); cudnnFilterDescriptor_t linLayerBiasDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerBiasDesc)); float *linLayerBias; cudnnErrCheck(cudnnGetRNNLinLayerBiasParams( cudnnHandle, rnnDesc, layer, xDesc[0], wDesc, w, linLayerID, linLayerBiasDesc, (void**)&linLayerBias)); cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerBiasDesc, 3, &dataType, &format, &nbDims, filterDimA)); initGPUData(linLayerBias, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f); cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerBiasDesc)); } } // ********************************************************************************************************* // At this point all of the setup is done. We now need to pass through the RNN. // ********************************************************************************************************* cudaErrCheck(hipDeviceSynchronize()); hipEvent_t start, stop; float timeForward, timeBackward1, timeBackward2; cudaErrCheck(hipEventCreate(&start)); cudaErrCheck(hipEventCreate(&stop)); cudaErrCheck(hipEventRecord(start)); // If we're not training we use this instead // cudnnErrCheck(cudnnRNNForwardInference(cudnnHandle, // rnnDesc, // xDesc, // x, // hxDesc, // hx, // cxDesc, // cx, // wDesc, // w, // yDesc, // y, // hyDesc, // hy, // cyDesc, // cy, // workspace, // workSize)); cudnnErrCheck(cudnnRNNForwardTraining(cudnnHandle, rnnDesc, seqLength, xDesc, x, hxDesc, hx, cxDesc, cx, wDesc, w, yDesc, y, hyDesc, hy, cyDesc, cy, workspace, workSize, reserveSpace, reserveSize)); float buffer[102400]; hipMemcpy(buffer, y, hiddenSize * seqLength * miniBatch * 1 * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < hiddenSize * seqLength * miniBatch * 1; ++i) { printf("%d : %g\n", i, buffer[i]); } cudaErrCheck(hipEventRecord(stop)); cudaErrCheck(hipEventSynchronize(stop)); cudaErrCheck(hipEventElapsedTime(&timeForward, start, stop)); cudaErrCheck(hipEventRecord(start)); cudnnErrCheck(cudnnRNNBackwardData(cudnnHandle, rnnDesc, seqLength, yDesc, y, dyDesc, dy, dhyDesc, dhy, dcyDesc, dcy, wDesc, w, hxDesc, hx, cxDesc, cx, dxDesc, dx, dhxDesc, dhx, dcxDesc, dcx, workspace, workSize, reserveSpace, reserveSize )); cudaErrCheck(hipEventRecord(stop)); cudaErrCheck(hipEventSynchronize(stop)); cudaErrCheck(hipEventElapsedTime(&timeBackward1, start, stop)); cudaErrCheck(hipEventRecord(start)); // cudnnRNNBackwardWeights adds to the data in dw. cudaErrCheck(hipMemset(dw, 0, weightsSize)); cudnnErrCheck(cudnnRNNBackwardWeights( cudnnHandle, rnnDesc, seqLength, xDesc, x, hxDesc, hx, yDesc, y, workspace, workSize, dwDesc, dw, reserveSpace, reserveSize )); cudaErrCheck(hipEventRecord(stop)); cudaErrCheck(hipEventSynchronize(stop)); cudaErrCheck(hipEventElapsedTime(&timeBackward2, start, stop)); int numMats = 0; if (RNNMode == miopenRNNRELU || RNNMode == miopenRNNTANH) { numMats = 2; } else if (RNNMode == miopenLSTM) { numMats = 8; } else if (RNNMode == miopenGRU) { numMats = 6; } // Calculate FLOPS printf("Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward)); printf("Backward: %3.0f GFLOPS, ", numMats * 4ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * (timeBackward1 + timeBackward2))); printf("(%3.0f GFLOPS), ", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward1)); printf("(%3.0f GFLOPS)\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward2)); // Calculate FLOPS fprintf(fp,"Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward)); fprintf(fp,"Backward: %3.0f GFLOPS, ", numMats * 4ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * (timeBackward1 + timeBackward2))); fprintf(fp,"(%3.0f GFLOPS), ", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward1)); fprintf(fp,"(%3.0f GFLOPS)\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward2)); // Make double-sure everything is finished before we copy for result checking. hipDeviceSynchronize(); // ********************************************************************************************************* // Print checksums. // ********************************************************************************************************* if (true) { float* testOutputi; float* testOutputh; float* testOutputc; int biDirScale = (bidirectional ? 2 : 1); testOutputi = (float*)malloc(hiddenSize * seqLength * miniBatch * biDirScale * sizeof(float)); testOutputh = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float)); testOutputc = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float)); cudaErrCheck(hipMemcpy(testOutputi, y, hiddenSize * seqLength * miniBatch * biDirScale * sizeof(float), hipMemcpyDeviceToHost)); if (hy != NULL) cudaErrCheck(hipMemcpy(testOutputh, hy, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), hipMemcpyDeviceToHost)); if (cy != NULL && RNNMode == miopenLSTM) cudaErrCheck(hipMemcpy(testOutputc, cy, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), hipMemcpyDeviceToHost)); double checksumi = 0.f; double checksumh = 0.f; double checksumc = 0.f; for (int m = 0; m < miniBatch; m++) { double localSumi = 0; double localSumh = 0; double localSumc = 0; for (int j = 0; j < seqLength; j++) { for (int i = 0; i < hiddenSize * biDirScale; i++) { localSumi += testOutputi[j * miniBatch * hiddenSize * biDirScale + m * hiddenSize * biDirScale + i]; } } for (int j = 0; j < numLayers * biDirScale; j++) { for (int i = 0; i < hiddenSize; i++) { if (hy != NULL) localSumh += testOutputh[j * hiddenSize * miniBatch + m * hiddenSize + i]; if (cy != NULL) if (RNNMode == miopenLSTM) localSumc += testOutputc[j * hiddenSize * miniBatch + m * hiddenSize + i]; } } checksumi += localSumi; checksumh += localSumh; checksumc += localSumc; } printf("i checksum %E ", checksumi); fprintf(fp,"i checksum %E ", checksumi); if (RNNMode == miopenLSTM) { printf("c checksum %E ", checksumc); fprintf(fp,"c checksum %E ", checksumc); } printf("h checksum %E\n", checksumh); fprintf(fp,"h checksum %E\n", checksumh); free(testOutputi); free(testOutputc); free(testOutputh); } if (true) { float* testOutputdi; float* testOutputdh; float* testOutputdc; int biDirScale = (bidirectional ? 2 : 1); testOutputdi = (float*)malloc(inputSize * seqLength * miniBatch * sizeof(float)); testOutputdh = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float)); testOutputdc = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float)); cudaErrCheck(hipMemcpy(testOutputdi, dx, seqLength * miniBatch * inputSize * sizeof(float), hipMemcpyDeviceToHost)); if (dhx != NULL) cudaErrCheck(hipMemcpy(testOutputdh, dhx, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), hipMemcpyDeviceToHost)); if (dcx != NULL) if (RNNMode == miopenLSTM) cudaErrCheck(hipMemcpy(testOutputdc, dcx, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), hipMemcpyDeviceToHost)); float checksumdi = 0.f; float checksumdh = 0.f; float checksumdc = 0.f; for (int m = 0; m < miniBatch; m++) { double localSumdi = 0; double localSumdh = 0; double localSumdc = 0; for (int j = 0; j < seqLength; j++) { for (int i = 0; i < inputSize; i++) { localSumdi += testOutputdi[j * miniBatch * inputSize + m * inputSize + i]; } } for (int j = 0; j < numLayers * biDirScale; j++) { for (int i = 0; i < hiddenSize; i++) { localSumdh += testOutputdh[j * hiddenSize * miniBatch + m * hiddenSize + i]; if (RNNMode == miopenLSTM) localSumdc += testOutputdc[j * hiddenSize * miniBatch + m * hiddenSize + i]; } } checksumdi += localSumdi; checksumdh += localSumdh; checksumdc += localSumdc; } printf("di checksum %E ", checksumdi); fprintf(fp,"di checksum %E ", checksumdi); if (RNNMode == miopenLSTM) { printf("dc checksum %E ", checksumdc); fprintf(fp,"dc checksum %E ", checksumdc); } printf("dh checksum %E\n", checksumdh); fprintf(fp,"dh checksum %E\n", checksumdh); free(testOutputdi); free(testOutputdh); free(testOutputdc); } if (true) { float* testOutputdw; testOutputdw = (float*)malloc(weightsSize); cudaErrCheck(hipMemcpy(testOutputdw, dw, weightsSize, hipMemcpyDeviceToHost)); double checksumdw = 0.; for (int i = 0; i < weightsSize / sizeof(float); i++) { checksumdw += testOutputdw[i]; } printf("dw checksum %E\n", checksumdw); fprintf(fp,"dw checksum %E\n", checksumdw); free(testOutputdw); } hipFree(x); hipFree(hx); hipFree(cx); hipFree(y); hipFree(hy); hipFree(cy); hipFree(dx); hipFree(dhx); hipFree(dcx); hipFree(dy); hipFree(dhy); hipFree(dcy); hipFree(workspace); hipFree(reserveSpace); hipFree(w); hipFree(dw); cudnnDestroy(cudnnHandle); fclose(fp); return 0; }
46fa62a5c0624be6c6a58c4d68b80d4c6e3f0a78.cu
/** * Copyright 2016 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <cudnn.h> #include <cuda.h> #include <stdio.h> // Reference outputs (calculated on an M40 GPU) // > ./RNN 20 2 512 64 0 // Forward: 1299 GFLOPs // Backward: 2171 GFLOPs, (1564 GFLOPs), (3549 GFLOPs) // i checksum 1.315793E+06 h checksum 1.315212E+05 // di checksum 6.676003E+01 dh checksum 6.425067E+01 // dw checksum 1.453750E+09 // // > ./RNN 20 2 512 64 1 // Forward: 1296 GFLOPs // Backward: 2235 GFLOPs, (1567 GFLOPs), (3896 GFLOPs) // i checksum 6.319591E+05 h checksum 6.319605E+04 // di checksum 4.501830E+00 dh checksum 4.489546E+00 // dw checksum 5.012598E+07 // // > ./RNN 20 2 512 64 2 // Forward: 2635 GFLOPs // Backward: 2757 GFLOPs, (2001 GFLOPs), (4433 GFLOPs) // i checksum 5.749536E+05 c checksum 4.365091E+05 h checksum 5.774818E+04 // di checksum 3.842206E+02 dc checksum 9.323785E+03 dh checksum 1.182566E+01 // dw checksum 4.313461E+08 // // > ./RNN 20 2 512 64 3 // Forward: 2428 GFLOPs // Backward: 2645 GFLOPs, (1915 GFLOPs), (4270 GFLOPs) // i checksum 6.358978E+05 h checksum 6.281680E+04 // di checksum 6.296622E+00 dh checksum 2.289960E+05 // dw checksum 5.397419E+07 // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define cudnnErrCheck(stat) { cudnnErrCheck_((stat), __FILE__, __LINE__); } void cudnnErrCheck_(cudnnStatus_t stat, const char *file, int line) { if (stat != CUDNN_STATUS_SUCCESS) { fprintf(stderr, "cuDNN Error: %s %s %d\n", cudnnGetErrorString(stat), file, line); } } __global__ void initGPUData_ker(float *data, int numElements, float value) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numElements) { data[tid] = value; } } void initGPUData(float *data, int numElements, float value) { dim3 gridDim; dim3 blockDim; blockDim.x = 1024; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; initGPUData_ker <<< gridDim, blockDim >>> (data, numElements, value); } int main(int argc, char* argv[]) { int seqLength; int numLayers; int hiddenSize; int inputSize; int miniBatch; float dropout; bool bidirectional; int mode; FILE *fp; fp=fopen("result.txt","w"); if (argc == 7) { seqLength = atoi(argv[1]); numLayers = atoi(argv[2]); hiddenSize = atoi(argv[3]); inputSize = hiddenSize; miniBatch = atoi(argv[4]); bidirectional = 0; mode = atoi(argv[5]); dropout = atof(argv[6]); } else { printf("Usage:\n"); printf("./RNN <seqLength> <numLayers> <hiddenSize> <miniBatch> <mode>\n"); printf("Modes: 0 = RNN_RELU, 1 = RNN_TANH, 2 = LSTM, 3 = GRU\n"); return 1; } // ------------------------- // Create cudnn context // ------------------------- cudnnHandle_t cudnnHandle; cudnnErrCheck(cudnnCreate(&cudnnHandle)); // ------------------------- // Set up inputs and outputs // ------------------------- void *x; void *hx = NULL; void *cx = NULL; void *dx; void *dhx = NULL; void *dcx = NULL; void *y; void *hy = NULL; void *cy = NULL; void *dy; void *dhy = NULL; void *dcy = NULL; // Memory allocation. hx, cx, dhx, dcx, hy, cy, dhy and dcy can be NULL. cudaErrCheck(cudaMalloc((void**)&x, seqLength * inputSize * miniBatch * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dx, seqLength * inputSize * miniBatch * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dhx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dcx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&y, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&hy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&cy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); // Set up tensor descriptors. x/y/dx/dy are arrays, one per time step. cudnnTensorDescriptor_t *xDesc, *yDesc, *dxDesc, *dyDesc; cudnnTensorDescriptor_t hxDesc, cxDesc; cudnnTensorDescriptor_t hyDesc, cyDesc; cudnnTensorDescriptor_t dhxDesc, dcxDesc; cudnnTensorDescriptor_t dhyDesc, dcyDesc; xDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); yDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); dxDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); dyDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); int dimA[3]; int strideA[3]; // In this example dimA[1] is constant across the whole sequence // This isn't required, all that is required is that it does not increase. for (int i = 0; i < seqLength; i++) { cudnnErrCheck(cudnnCreateTensorDescriptor(&xDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&yDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&dxDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&dyDesc[i])); dimA[0] = miniBatch; dimA[1] = inputSize; dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnSetTensorNdDescriptor(xDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dxDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); dimA[0] = miniBatch; dimA[1] = bidirectional ? hiddenSize * 2 : hiddenSize; dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnSetTensorNdDescriptor(yDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dyDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); } dimA[0] = numLayers * (bidirectional ? 2 : 1); dimA[1] = miniBatch; dimA[2] = hiddenSize; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnCreateTensorDescriptor(&hxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&cxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&hyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&cyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dhxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dcxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dhyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dcyDesc)); cudnnErrCheck(cudnnSetTensorNdDescriptor(hxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(cxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(hyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(cyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dhxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dcxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dhyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dcyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); // ------------------------- // Set up the dropout descriptor (needed for the RNN descriptor) // ------------------------- unsigned long long seed = 1337ull; // Pick a seed. cudnnDropoutDescriptor_t dropoutDesc; cudnnErrCheck(cudnnCreateDropoutDescriptor(&dropoutDesc)); // How much memory does dropout need for states? // These states are used to generate random numbers internally // and should not be freed until the RNN descriptor is no longer used size_t stateSize; void *states; cudnnErrCheck(cudnnDropoutGetStatesSize(cudnnHandle, &stateSize)); cudaErrCheck(cudaMalloc(&states, stateSize)); cudnnErrCheck(cudnnSetDropoutDescriptor(dropoutDesc, cudnnHandle, dropout, states, stateSize, seed)); printf("dropout = %g, stateSize = %ld\n", dropout, stateSize); // ------------------------- // Set up the RNN descriptor // ------------------------- cudnnRNNDescriptor_t rnnDesc; cudnnRNNMode_t RNNMode; cudnnErrCheck(cudnnCreateRNNDescriptor(&rnnDesc)); if (mode == 0) RNNMode = CUDNN_RNN_RELU; else if (mode == 1) RNNMode = CUDNN_RNN_TANH; else if (mode == 2) RNNMode = CUDNN_LSTM; else if (mode == 3) RNNMode = CUDNN_GRU; cudnnErrCheck(cudnnSetRNNDescriptor(rnnDesc, hiddenSize, numLayers, dropoutDesc, CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation bidirectional ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, RNNMode, CUDNN_DATA_FLOAT)); // ------------------------- // Set up parameters // ------------------------- // This needs to be done after the rnn descriptor is set as otherwise // we don't know how many parameters we have to allocate void *w; void *dw; cudnnFilterDescriptor_t wDesc, dwDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&wDesc)); cudnnErrCheck(cudnnCreateFilterDescriptor(&dwDesc)); size_t weightsSize; cudnnErrCheck(cudnnGetRNNParamsSize(cudnnHandle, rnnDesc, xDesc[0], &weightsSize, CUDNN_DATA_FLOAT)); int dimW[3]; dimW[0] = weightsSize / sizeof(float); dimW[1] = 1; dimW[2] = 1; cudnnErrCheck(cudnnSetFilterNdDescriptor(wDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW)); cudnnErrCheck(cudnnSetFilterNdDescriptor(dwDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW)); cudaErrCheck(cudaMalloc((void**)&w, weightsSize)); cudaErrCheck(cudaMalloc((void**)&dw, weightsSize)); // ------------------------- // Set up work space and reserved memory // ------------------------- void *workspace; void *reserveSpace; size_t workSize; size_t reserveSize; // Need for every pass cudnnErrCheck(cudnnGetRNNWorkspaceSize(cudnnHandle, rnnDesc, seqLength, xDesc, &workSize)); // Only needed in training, shouldn't be touched between passes. cudnnErrCheck(cudnnGetRNNTrainingReserveSize(cudnnHandle, rnnDesc, seqLength, xDesc, &reserveSize)); cudaErrCheck(cudaMalloc((void**)&workspace, workSize)); cudaErrCheck(cudaMalloc((void**)&reserveSpace, reserveSize)); // ********************************************************************************************************* // Initialise weights and inputs // ********************************************************************************************************* // We initialise to something simple. // Matrices are initialised to 1 / matrixSize, biases to 1, data is 1. initGPUData((float*)x, seqLength * inputSize * miniBatch, 1.f); if (hx != NULL) initGPUData((float*)hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (cx != NULL) initGPUData((float*)cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); initGPUData((float*)dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (dhy != NULL) initGPUData((float*)dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (dcy != NULL) initGPUData((float*)dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); // Weights int numLinearLayers = 0; if (RNNMode == CUDNN_RNN_RELU || RNNMode == CUDNN_RNN_TANH) { numLinearLayers = 2; } else if (RNNMode == CUDNN_LSTM) { numLinearLayers = 8; } else if (RNNMode == CUDNN_GRU) { numLinearLayers = 6; } for (int layer = 0; layer < numLayers * (bidirectional ? 2 : 1); layer++) { for (int linLayerID = 0; linLayerID < numLinearLayers; linLayerID++) { cudnnFilterDescriptor_t linLayerMatDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerMatDesc)); float *linLayerMat; cudnnErrCheck(cudnnGetRNNLinLayerMatrixParams( cudnnHandle, rnnDesc, layer, xDesc[0], wDesc, w, linLayerID, linLayerMatDesc, (void**)&linLayerMat)); cudnnDataType_t dataType; cudnnTensorFormat_t format; int nbDims; int filterDimA[3]; cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerMatDesc, 3, &dataType, &format, &nbDims, filterDimA)); initGPUData(linLayerMat, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f / (float)(filterDimA[0] * filterDimA[1] * filterDimA[2])); cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerMatDesc)); cudnnFilterDescriptor_t linLayerBiasDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerBiasDesc)); float *linLayerBias; cudnnErrCheck(cudnnGetRNNLinLayerBiasParams( cudnnHandle, rnnDesc, layer, xDesc[0], wDesc, w, linLayerID, linLayerBiasDesc, (void**)&linLayerBias)); cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerBiasDesc, 3, &dataType, &format, &nbDims, filterDimA)); initGPUData(linLayerBias, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f); cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerBiasDesc)); } } // ********************************************************************************************************* // At this point all of the setup is done. We now need to pass through the RNN. // ********************************************************************************************************* cudaErrCheck(cudaDeviceSynchronize()); cudaEvent_t start, stop; float timeForward, timeBackward1, timeBackward2; cudaErrCheck(cudaEventCreate(&start)); cudaErrCheck(cudaEventCreate(&stop)); cudaErrCheck(cudaEventRecord(start)); // If we're not training we use this instead // cudnnErrCheck(cudnnRNNForwardInference(cudnnHandle, // rnnDesc, // xDesc, // x, // hxDesc, // hx, // cxDesc, // cx, // wDesc, // w, // yDesc, // y, // hyDesc, // hy, // cyDesc, // cy, // workspace, // workSize)); cudnnErrCheck(cudnnRNNForwardTraining(cudnnHandle, rnnDesc, seqLength, xDesc, x, hxDesc, hx, cxDesc, cx, wDesc, w, yDesc, y, hyDesc, hy, cyDesc, cy, workspace, workSize, reserveSpace, reserveSize)); float buffer[102400]; cudaMemcpy(buffer, y, hiddenSize * seqLength * miniBatch * 1 * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < hiddenSize * seqLength * miniBatch * 1; ++i) { printf("%d : %g\n", i, buffer[i]); } cudaErrCheck(cudaEventRecord(stop)); cudaErrCheck(cudaEventSynchronize(stop)); cudaErrCheck(cudaEventElapsedTime(&timeForward, start, stop)); cudaErrCheck(cudaEventRecord(start)); cudnnErrCheck(cudnnRNNBackwardData(cudnnHandle, rnnDesc, seqLength, yDesc, y, dyDesc, dy, dhyDesc, dhy, dcyDesc, dcy, wDesc, w, hxDesc, hx, cxDesc, cx, dxDesc, dx, dhxDesc, dhx, dcxDesc, dcx, workspace, workSize, reserveSpace, reserveSize )); cudaErrCheck(cudaEventRecord(stop)); cudaErrCheck(cudaEventSynchronize(stop)); cudaErrCheck(cudaEventElapsedTime(&timeBackward1, start, stop)); cudaErrCheck(cudaEventRecord(start)); // cudnnRNNBackwardWeights adds to the data in dw. cudaErrCheck(cudaMemset(dw, 0, weightsSize)); cudnnErrCheck(cudnnRNNBackwardWeights( cudnnHandle, rnnDesc, seqLength, xDesc, x, hxDesc, hx, yDesc, y, workspace, workSize, dwDesc, dw, reserveSpace, reserveSize )); cudaErrCheck(cudaEventRecord(stop)); cudaErrCheck(cudaEventSynchronize(stop)); cudaErrCheck(cudaEventElapsedTime(&timeBackward2, start, stop)); int numMats = 0; if (RNNMode == CUDNN_RNN_RELU || RNNMode == CUDNN_RNN_TANH) { numMats = 2; } else if (RNNMode == CUDNN_LSTM) { numMats = 8; } else if (RNNMode == CUDNN_GRU) { numMats = 6; } // Calculate FLOPS printf("Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward)); printf("Backward: %3.0f GFLOPS, ", numMats * 4ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * (timeBackward1 + timeBackward2))); printf("(%3.0f GFLOPS), ", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward1)); printf("(%3.0f GFLOPS)\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward2)); // Calculate FLOPS fprintf(fp,"Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward)); fprintf(fp,"Backward: %3.0f GFLOPS, ", numMats * 4ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * (timeBackward1 + timeBackward2))); fprintf(fp,"(%3.0f GFLOPS), ", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward1)); fprintf(fp,"(%3.0f GFLOPS)\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward2)); // Make double-sure everything is finished before we copy for result checking. cudaDeviceSynchronize(); // ********************************************************************************************************* // Print checksums. // ********************************************************************************************************* if (true) { float* testOutputi; float* testOutputh; float* testOutputc; int biDirScale = (bidirectional ? 2 : 1); testOutputi = (float*)malloc(hiddenSize * seqLength * miniBatch * biDirScale * sizeof(float)); testOutputh = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float)); testOutputc = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float)); cudaErrCheck(cudaMemcpy(testOutputi, y, hiddenSize * seqLength * miniBatch * biDirScale * sizeof(float), cudaMemcpyDeviceToHost)); if (hy != NULL) cudaErrCheck(cudaMemcpy(testOutputh, hy, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), cudaMemcpyDeviceToHost)); if (cy != NULL && RNNMode == CUDNN_LSTM) cudaErrCheck(cudaMemcpy(testOutputc, cy, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), cudaMemcpyDeviceToHost)); double checksumi = 0.f; double checksumh = 0.f; double checksumc = 0.f; for (int m = 0; m < miniBatch; m++) { double localSumi = 0; double localSumh = 0; double localSumc = 0; for (int j = 0; j < seqLength; j++) { for (int i = 0; i < hiddenSize * biDirScale; i++) { localSumi += testOutputi[j * miniBatch * hiddenSize * biDirScale + m * hiddenSize * biDirScale + i]; } } for (int j = 0; j < numLayers * biDirScale; j++) { for (int i = 0; i < hiddenSize; i++) { if (hy != NULL) localSumh += testOutputh[j * hiddenSize * miniBatch + m * hiddenSize + i]; if (cy != NULL) if (RNNMode == CUDNN_LSTM) localSumc += testOutputc[j * hiddenSize * miniBatch + m * hiddenSize + i]; } } checksumi += localSumi; checksumh += localSumh; checksumc += localSumc; } printf("i checksum %E ", checksumi); fprintf(fp,"i checksum %E ", checksumi); if (RNNMode == CUDNN_LSTM) { printf("c checksum %E ", checksumc); fprintf(fp,"c checksum %E ", checksumc); } printf("h checksum %E\n", checksumh); fprintf(fp,"h checksum %E\n", checksumh); free(testOutputi); free(testOutputc); free(testOutputh); } if (true) { float* testOutputdi; float* testOutputdh; float* testOutputdc; int biDirScale = (bidirectional ? 2 : 1); testOutputdi = (float*)malloc(inputSize * seqLength * miniBatch * sizeof(float)); testOutputdh = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float)); testOutputdc = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float)); cudaErrCheck(cudaMemcpy(testOutputdi, dx, seqLength * miniBatch * inputSize * sizeof(float), cudaMemcpyDeviceToHost)); if (dhx != NULL) cudaErrCheck(cudaMemcpy(testOutputdh, dhx, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), cudaMemcpyDeviceToHost)); if (dcx != NULL) if (RNNMode == CUDNN_LSTM) cudaErrCheck(cudaMemcpy(testOutputdc, dcx, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), cudaMemcpyDeviceToHost)); float checksumdi = 0.f; float checksumdh = 0.f; float checksumdc = 0.f; for (int m = 0; m < miniBatch; m++) { double localSumdi = 0; double localSumdh = 0; double localSumdc = 0; for (int j = 0; j < seqLength; j++) { for (int i = 0; i < inputSize; i++) { localSumdi += testOutputdi[j * miniBatch * inputSize + m * inputSize + i]; } } for (int j = 0; j < numLayers * biDirScale; j++) { for (int i = 0; i < hiddenSize; i++) { localSumdh += testOutputdh[j * hiddenSize * miniBatch + m * hiddenSize + i]; if (RNNMode == CUDNN_LSTM) localSumdc += testOutputdc[j * hiddenSize * miniBatch + m * hiddenSize + i]; } } checksumdi += localSumdi; checksumdh += localSumdh; checksumdc += localSumdc; } printf("di checksum %E ", checksumdi); fprintf(fp,"di checksum %E ", checksumdi); if (RNNMode == CUDNN_LSTM) { printf("dc checksum %E ", checksumdc); fprintf(fp,"dc checksum %E ", checksumdc); } printf("dh checksum %E\n", checksumdh); fprintf(fp,"dh checksum %E\n", checksumdh); free(testOutputdi); free(testOutputdh); free(testOutputdc); } if (true) { float* testOutputdw; testOutputdw = (float*)malloc(weightsSize); cudaErrCheck(cudaMemcpy(testOutputdw, dw, weightsSize, cudaMemcpyDeviceToHost)); double checksumdw = 0.; for (int i = 0; i < weightsSize / sizeof(float); i++) { checksumdw += testOutputdw[i]; } printf("dw checksum %E\n", checksumdw); fprintf(fp,"dw checksum %E\n", checksumdw); free(testOutputdw); } cudaFree(x); cudaFree(hx); cudaFree(cx); cudaFree(y); cudaFree(hy); cudaFree(cy); cudaFree(dx); cudaFree(dhx); cudaFree(dcx); cudaFree(dy); cudaFree(dhy); cudaFree(dcy); cudaFree(workspace); cudaFree(reserveSpace); cudaFree(w); cudaFree(dw); cudnnDestroy(cudnnHandle); fclose(fp); return 0; }
e9b8907638e8e78f6b800d94de03181d71859283.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <ctime> #include <iostream> #include <random> #include <stdio.h> #define _SIZE_ 1000000 /* hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } */ __global__ void addLoopGPU(int* a, int* b, int* c) { int tid = blockIdx.x; if (tid < 64) c[tid] = abs(powf(b[tid], 2) - powf(b[tid], 2)); } int main() { /* const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; */ int* a = new int[_SIZE_]; int* b = new int[_SIZE_]; int* c = new int[_SIZE_]; int* result = new int[_SIZE_]; int* dev_result, * dev_a, * dev_b = 0; hipMalloc((void**)& dev_a, _SIZE_ * sizeof(int)); hipMalloc((void**)& dev_b, _SIZE_ * sizeof(int)); hipMalloc((void**)& dev_result, _SIZE_ * sizeof(int)); for (int i = 0; i < _SIZE_; i++) { a[i] = rand() % 256; b[i] = rand() % 256; } /* std::cout << "CPU" << std::endl; clock_t startTimeCPU = clock(); for (int i = 0; i < _SIZE_; i++) { c[i] = abs(powf(a[i], 2) - powf(b[i], 2)); } clock_t stopTimeCPU = clock(); std::cout << "Fini(GPU) en : " << stopTimeCPU - startTimeCPU << " ms" << std::endl; std::cout << "Fin CPU" << std::endl << std::endl; */ std::cout << "GPU" << std::endl; clock_t startTime = clock(); hipMemcpy(dev_a, a, _SIZE_ * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, _SIZE_ * sizeof(int), hipMemcpyHostToDevice); addLoopGPU << <64, 8 >> > (dev_a, dev_b, dev_result); hipMemcpy(&result, dev_result, sizeof(int), hipMemcpyDeviceToHost); clock_t stopTime = clock(); std::cout << "Fini(GPU) en : " << stopTime - startTime << " ms" << std::endl; std::cout << "Fin CPU" << std::endl << std::endl; hipFree(dev_a); hipFree(dev_b); hipFree(dev_result); delete(a); delete(b); delete(c); return 0; }
e9b8907638e8e78f6b800d94de03181d71859283.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <ctime> #include <iostream> #include <random> #include <stdio.h> #define _SIZE_ 1000000 /* cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } */ __global__ void addLoopGPU(int* a, int* b, int* c) { int tid = blockIdx.x; if (tid < 64) c[tid] = abs(powf(b[tid], 2) - powf(b[tid], 2)); } int main() { /* const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; */ int* a = new int[_SIZE_]; int* b = new int[_SIZE_]; int* c = new int[_SIZE_]; int* result = new int[_SIZE_]; int* dev_result, * dev_a, * dev_b = 0; cudaMalloc((void**)& dev_a, _SIZE_ * sizeof(int)); cudaMalloc((void**)& dev_b, _SIZE_ * sizeof(int)); cudaMalloc((void**)& dev_result, _SIZE_ * sizeof(int)); for (int i = 0; i < _SIZE_; i++) { a[i] = rand() % 256; b[i] = rand() % 256; } /* std::cout << "CPU" << std::endl; clock_t startTimeCPU = clock(); for (int i = 0; i < _SIZE_; i++) { c[i] = abs(powf(a[i], 2) - powf(b[i], 2)); } clock_t stopTimeCPU = clock(); std::cout << "Fini(GPU) en : " << stopTimeCPU - startTimeCPU << " ms" << std::endl; std::cout << "Fin CPU" << std::endl << std::endl; */ std::cout << "GPU" << std::endl; clock_t startTime = clock(); cudaMemcpy(dev_a, a, _SIZE_ * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, _SIZE_ * sizeof(int), cudaMemcpyHostToDevice); addLoopGPU << <64, 8 >> > (dev_a, dev_b, dev_result); cudaMemcpy(&result, dev_result, sizeof(int), cudaMemcpyDeviceToHost); clock_t stopTime = clock(); std::cout << "Fini(GPU) en : " << stopTime - startTime << " ms" << std::endl; std::cout << "Fin CPU" << std::endl << std::endl; cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_result); delete(a); delete(b); delete(c); return 0; }
90632bcbefa771a2153d5af66708242816aeb4f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (C) 2013 David G. Andersen. All rights reserved. * * Use of this code is covered under the Apache 2.0 license, which * can be found in the file "LICENSE" */ #include <sys/time.h> #include "hasher.h" #include "scrypt_cores.cu" /* write_keys writes the 8 keys being processed by a warp to the global * scratchpad. To effectively use memory bandwidth, it performs the writes * (and reads, for read_keys) 128 bytes at a time per memory location * by __shfl'ing the 4 entries in bx to the threads in the next-up * thread group. It then has eight threads together perform uint4 * (128 bit) writes to the destination region. This seems to make * quite effective use of memory bandwidth. An approach that spread * uint32s across more threads was slower because of the increased * computation it required. * * "start" is the loop iteration producing the write - the offset within * the block's memory. * * Internally, this algorithm first __shfl's the 4 bx entries to * the next up thread group, and then uses a conditional move to * ensure that odd-numbered thread groups exchange the b/bx ordering * so that the right parts are written together. * * Thanks to Babu for helping design the 128-bit-per-write version. * * _direct lets the caller specify the absolute start location instead of * the relative start location, as an attempt to reduce some recomputation. */ __device__ inline void write_keys_direct(const uint32_t b[4], const uint32_t bx[4], uint32_t *scratch, uint32_t start) { uint4 t, t2; t.x = b[0]; t.y = b[1]; t.z = b[2]; t.w = b[3]; int target_thread = (threadIdx.x + 4)%32; t2.x = __shfl((int)bx[0], target_thread); t2.y = __shfl((int)bx[1], target_thread); t2.z = __shfl((int)bx[2], target_thread); t2.w = __shfl((int)bx[3], target_thread); int t2_start = __shfl((int)start, target_thread) + 4; bool c = (threadIdx.x & 0x4); int loc = c ? t2_start : start; *((uint4 *)(&scratch[loc])) = (c ? t2 : t); loc = c ? start : t2_start; *((uint4 *)(&scratch[loc])) = (c ? t : t2); } __device__ inline void write_keys(const uint32_t b[4], const uint32_t bx[4], uint32_t *scratch, uint32_t start) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; start = scrypt_block*SCRYPT_SCRATCH_PER_BLOCK + (32*start) + 8*(threadIdx.x%4); write_keys_direct(b, bx, scratch, start); } inline __device__ void read_xor_keys_direct(uint32_t b[4], uint32_t bx[4], const __restrict__ uint32_t *scratch, uint32_t start) { uint4 t, t2; // Tricky bit: We do the work on behalf of thread+4, but then when // we steal, we have to steal from (thread+28)%32 to get the right // stuff back. start = __shfl((int)start, (threadIdx.x & 0x7c)) + 8*(threadIdx.x%4); int target_thread = (threadIdx.x + 4)%32; int t2_start = __shfl((int)start, target_thread) + 4; bool c = (threadIdx.x & 0x4); int loc = c ? t2_start : start; t = *((uint4 *)(&scratch[loc])); loc = c ? start : t2_start; t2 = *((uint4 *)(&scratch[loc])); uint4 tmp = t; t = (c ? t2 : t); t2 = (c ? tmp : t2); b[0] ^= t.x; b[1] ^= t.y; b[2] ^= t.z; b[3] ^= t.w; int steal_target = (threadIdx.x + 28)%32; bx[0] ^= __shfl((int)t2.x, steal_target); bx[1] ^= __shfl((int)t2.y, steal_target); bx[2] ^= __shfl((int)t2.z, steal_target); bx[3] ^= __shfl((int)t2.w, steal_target); } inline __device__ void read_xor_keys(uint32_t b[4], uint32_t bx[4], const __restrict__ uint32_t *scratch, uint32_t start) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; start = scrypt_block*SCRYPT_SCRATCH_PER_BLOCK + (32*start); read_xor_keys_direct(b, bx, scratch, start); } inline __device__ void primary_order_shuffle(uint32_t b[4], uint32_t bx[4]) { /* Inner loop shuffle targets */ int x1_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+1)&0x3); int x2_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+2)&0x3); int x3_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+3)&0x3); b[3] = __shfl((int)b[3], x1_target_lane); b[2] = __shfl((int)b[2], x2_target_lane); b[1] = __shfl((int)b[1], x3_target_lane); uint32_t tmp = b[1]; b[1] = b[3]; b[3] = tmp; bx[3] = __shfl((int)bx[3], x1_target_lane); bx[2] = __shfl((int)bx[2], x2_target_lane); bx[1] = __shfl((int)bx[1], x3_target_lane); tmp = bx[1]; bx[1] = bx[3]; bx[3] = tmp; } /* * load_key loads a 32*32bit key from a contiguous region of memory in B. * The input keys are in external order (i.e., 0, 1, 2, 3, ...). * After loading, each thread has its four b and four bx keys stored * in internal processing order. */ inline __device__ void load_key(const uint32_t *B, uint32_t b[4], uint32_t bx[4]) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; int key_offset = scrypt_block * 32; uint32_t thread_in_block = threadIdx.x % 4; // Read in permuted order. Key loads are not our bottleneck right now. for (int i = 0; i < 4; i++) { b[i] = B[key_offset + 4*thread_in_block + (thread_in_block+i)%4]; bx[i] = B[key_offset + 4*thread_in_block + (thread_in_block+i)%4 + 16]; } primary_order_shuffle(b, bx); } /* * store_key performs the opposite transform as load_key, taking * internally-ordered b and bx and storing them into a contiguous * region of B in external order. */ inline __device__ void store_key(uint32_t *B, uint32_t b[4], uint32_t bx[4]) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; int key_offset = scrypt_block * 32; uint32_t thread_in_block = threadIdx.x % 4; primary_order_shuffle(b, bx); for (int i = 0; i < 4; i++) { B[key_offset + 4*thread_in_block + (thread_in_block+i)%4] = b[i]; B[key_offset + 4*thread_in_block + (thread_in_block+i)%4 + 16] = bx[i]; } } /* * salsa_xor_core does the equivalent of the xor_salsa8 loop from * tarsnap's implementation of scrypt. The original scrypt called: * xor_salsa8(&X[0], &X[16]); <-- the "b" loop * xor_salsa8(&X[16], &X[0]); <-- the "bx" loop * This version is unrolled to handle both of these loops in a single * call to avoid unnecessary data movement. */ inline __device__ void salsa_xor_core(uint32_t b[4], uint32_t bx[4], uint32_t x[4], const int x1_target_lane, const int x2_target_lane, const int x3_target_lane) { uint32_t tmp; #pragma unroll for (int i = 0; i < 4; i++) { b[i] ^= bx[i]; x[i] = b[i]; } #define XOR_ROTATE_ADD(dst, s1, s2, amt) do { tmp = x[s1]+x[s2]; x[dst] ^= ((tmp<<amt)|(tmp>>(32-amt))); } while(0) // Enter in "column" mode (t0 has 0, 4, 8, 12) for (int j = 0; j < 4; j++) { // Mixing phase of salsa XOR_ROTATE_ADD(1, 0, 3, 7); XOR_ROTATE_ADD(2, 1, 0, 9); XOR_ROTATE_ADD(3, 2, 1, 13); XOR_ROTATE_ADD(0, 3, 2, 18); /* Transpose rows and columns. */ /* Unclear if this optimization is needed: These are ordered based * upon the dependencies needed in the later xors. Compiler should be * able to figure this out, but might as well give it a hand. */ x[1] = __shfl((int)x[1], x3_target_lane); x[3] = __shfl((int)x[3], x1_target_lane); x[2] = __shfl((int)x[2], x2_target_lane); /* The next XOR_ROTATE_ADDS could be written to be a copy-paste of the first, * but the register targets are rewritten here to swap x[1] and x[3] so that * they can be directly shuffled to and from our peer threads without * reassignment. The reverse shuffle then puts them back in the right place. */ XOR_ROTATE_ADD(3, 0, 1, 7); XOR_ROTATE_ADD(2, 3, 0, 9); XOR_ROTATE_ADD(1, 2, 3, 13); XOR_ROTATE_ADD(0, 1, 2, 18); x[3] = __shfl((int)x[3], x3_target_lane); x[1] = __shfl((int)x[1], x1_target_lane); x[2] = __shfl((int)x[2], x2_target_lane); } for (int i = 0; i < 4; i++) { b[i] += x[i]; // The next two lines are the beginning of the BX-centric loop iteration bx[i] ^= b[i]; x[i] = bx[i]; } // This is a copy of the same loop above, identical but stripped of comments. // Duplicated so that we can complete a bx-based loop with fewer register moves. for (int j = 0; j < 4; j++) { XOR_ROTATE_ADD(1, 0, 3, 7); XOR_ROTATE_ADD(2, 1, 0, 9); XOR_ROTATE_ADD(3, 2, 1, 13); XOR_ROTATE_ADD(0, 3, 2, 18); x[1] = __shfl((int)x[1], x3_target_lane); x[3] = __shfl((int)x[3], x1_target_lane); x[2] = __shfl((int)x[2], x2_target_lane); XOR_ROTATE_ADD(3, 0, 1, 7); XOR_ROTATE_ADD(2, 3, 0, 9); XOR_ROTATE_ADD(1, 2, 3, 13); XOR_ROTATE_ADD(0, 1, 2, 18); x[3] = __shfl((int)x[3], x3_target_lane); x[1] = __shfl((int)x[1], x1_target_lane); x[2] = __shfl((int)x[2], x2_target_lane); } // At the end of these iterations, the data is in primary order again. #undef XOR_ROTATE_ADD for (int i = 0; i < 4; i++) { bx[i] += x[i]; } } /* * The hasher_gen_kernel operates on a group of 1024-bit input keys * in B, stored as: * B = { k1B k1Bx k2B k2Bx ... } * and fills up the scratchpad with the iterative hashes derived from * those keys: * scratch { k1h1B k1h1Bx K1h2B K1h2Bx ... K2h1B K2h1Bx K2h2B K2h2Bx ... } * scratch is 1024 times larger than the input keys B. * It is extremely important to stream writes effectively into scratch; * less important to coalesce the reads from B. * * Key ordering note: Keys are input from B in "original" order: * K = {k1, k2, k3, k4, k5, ..., kx15, kx16, kx17, ..., kx31 } * After inputting into kernel_gen, each component k and kx of the * key is transmuted into a permuted internal order to make processing faster: * K = k, kx with: * k = 0, 4, 8, 12, 5, 9, 13, 1, 10, 14, 2, 6, 15, 3, 7, 11 * and similarly for kx. */ __global__ void hasher_gen_kernel(__restrict__ uint32_t *B, __restrict__ uint32_t *scratch) { /* Each thread operates on four of the sixteen B and Bx variables. Thus, * each key is processed by four threads in parallel. salsa_scrypt_core * internally shuffles the variables between threads (and back) as * needed. */ uint32_t b[4], bx[4], x[4]; load_key(B, b, bx); /* Inner loop shuffle targets */ int x1_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+1)&0x3); int x2_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+2)&0x3); int x3_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+3)&0x3); int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; int start = scrypt_block*SCRYPT_SCRATCH_PER_BLOCK + 8*(threadIdx.x%4); for (int i = 0; i < 1024; i++) { write_keys_direct(b, bx, scratch, start+32*i); salsa_xor_core(b, bx, x, x1_target_lane, x2_target_lane, x3_target_lane); } store_key(B, b, bx); } /* * hasher_hash_kernel runs the second phase of scrypt after the scratch * buffer is filled with the iterative hashes: It bounces through * the scratch buffer in pseudorandom order, mixing the key as it goes. */ __global__ void hasher_hash_kernel(__restrict__ uint32_t *B, const __restrict__ uint32_t *scratch) { /* Each thread operates on a group of four variables that must be processed * together. Shuffle between threaads in a warp between iterations. */ uint32_t b[4], bx[4], x[4]; load_key(B, b, bx); /* Inner loop shuffle targets */ int x1_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+1)&0x3); int x2_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+2)&0x3); int x3_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+3)&0x3); for (int i = 0; i < 1024; i++) { // Bounce through the key space and XOR the new keys in. // Critical thing: (X[16] & 1023) tells us the next slot to read. // X[16] in the original is bx[0] int slot = bx[0] & 1023; read_xor_keys(b, bx, scratch, slot); salsa_xor_core(b, bx, x, x1_target_lane, x2_target_lane, x3_target_lane); } store_key(B, b, bx); } /* * hasher_combo_kernel runs the functions of both hasher_gen_kernel * and hasher_hash_kernel in a single invocation. It is * designed to reduce kernel launch downtime a bit and omit one * intermediate store_key operation to global memory. This is faster on * my GT 550m, but seems a bit slower on a Tesla, probably because one of * the two individual kernels can use fewer registers alone. */ __global__ void hasher_combo_kernel(__restrict__ uint32_t *B, __restrict__ uint32_t *scratch) { uint32_t b[4], bx[4], x[4]; load_key(B, b, bx); int x1_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+1)&0x3); int x2_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+2)&0x3); int x3_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+3)&0x3); int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; int start = scrypt_block*SCRYPT_SCRATCH_PER_BLOCK + 8*(threadIdx.x%4); for (int i = 0; i < 1024; i++) { write_keys_direct(b, bx, scratch, start); start += 32; salsa_xor_core(b, bx, x, x1_target_lane, x2_target_lane, x3_target_lane); } start = scrypt_block*SCRYPT_SCRATCH_PER_BLOCK; for (int i = 0; i < 1024; i++) { int slot = bx[0] & 1023; read_xor_keys_direct(b, bx, scratch, start+32*slot); salsa_xor_core(b, bx, x, x1_target_lane, x2_target_lane, x3_target_lane); } store_key(B, b, bx); } /* * scrypt_hash_start_kernel takes a job description (job) and a starting nonce number (n_base) * and generates (batchsize) starting hashes using HMAC_SHA256 and PBKDF2_SHA256. * This is the first step in scrypt computation before the salsa core executions * that introduce the memory difficulty. * * This function stores three outputs: * output - the 1024-bit intermediate state fed to scrypt_core * tstate, ostate - intermediate PBKDF2 state that needs to be used again * after the execution of scrypt_core. */ __global__ void scrypt_hash_start_kernel(const __restrict__ scan_job *job, __restrict__ uint32_t *output, __restrict__ uint32_t *ostate_out, __restrict__ uint32_t *tstate_out, uint32_t n_base) { uint32_t tstate[8]; uint32_t ostate[8]; uint32_t data[20]; int blockid = (blockIdx.x*blockDim.x + threadIdx.x); /* data: the input. * tstate, ostate, output must have sufficient space * -> batchsize * 8 * sizeof(uint32_t) */ /* Trivial implementation: Each thread processes one key. This is lame, but * PBKDF related processing is only about 3.5% of runtime right now. */ const uint32_t *in_data = job->data; uint64_t blockstart = blockid*32; { uint4 d; for (int i = 0; i < 20; i+= 4) { d = *(uint4 *)&in_data[i]; data[i] = d.x; data[i+1] = d.y; data[i+2] = d.z; data[i+3] = d.w; } } data[19] = n_base + blockid; read_8_as_uint4(job->initial_midstate, tstate); dev_HMAC_SHA256_80_init(data, tstate, ostate); /* This writes directly to output and does no shuffling or cleverness * to coalesce writes. Unnecessary memory transactions, but not worth * fixing yet - PBKDF is less than 4% of runtime overall still. */ dev_PBKDF2_SHA256_80_128(tstate, ostate, data, &output[blockstart]); /* Write out (and read back) tstate and ostate interleaved across threads to improve mem b/w. Easy change, though small benefit. */ int bigblockstart = (blockIdx.x*blockDim.x)*8; int threadsInBlock = blockDim.x; for (int i = 0; i < 8; i++) { tstate_out[bigblockstart + threadsInBlock*i + threadIdx.x ] = tstate[i]; ostate_out[bigblockstart + threadsInBlock*i + threadIdx.x ] = ostate[i]; } } /* * scrypt_hash_finish_kernel takes the output state from scrypt_core and * recombines it with the saved PBKDF2 tstate and ostate from * scrypt_hash_start_kernel to produce an output key. * * It then compares the output key ("hash") to the target number * specified in the job to determine whether hash < job->target. * If it is, it puts its block/thread ID + 1 into *dev_output. * If it is not, it does nothing. The caller should ensure that * dev_output is zero before the call to scrypt_hash_finish_kernel, and * should subtract one from non-zero output to determine the * actual thread ID (and thus, the nonce used for hashing). * * This method does not guarantee that the lowest-numbered or * smallest acceptable output is returned, merely that one satisfying * output is returned if any exist. */ __global__ void scrypt_hash_finish_kernel(const __restrict__ uint32_t *dev_keys, __restrict__ uint32_t *dev_tstate, __restrict__ uint32_t *dev_ostate, __restrict__ uint32_t *dev_output, __restrict__ const scan_job *job) { uint32_t tstate[8]; uint32_t ostate[8]; uint32_t hash[32]; int blockid = (blockIdx.x*blockDim.x + threadIdx.x); int bigblockstart = blockIdx.x*blockDim.x*8; int threadsInBlock = blockDim.x; /* As in start_kernel, reads tstate/ostate interleaved between threads */ for (int i = 0; i < 8; i++) { tstate[i] = dev_tstate[bigblockstart + threadsInBlock*i + threadIdx.x ]; ostate[i] = dev_ostate[bigblockstart + threadsInBlock*i + threadIdx.x ]; } uint64_t blockstart = blockid*32; uint4 t; for (int i = 0; i < 32; i+= 4) { t = *(uint4 *)&dev_keys[blockstart+i]; hash[i] = t.x; hash[i+1] = t.y; hash[i+2] = t.z; hash[i+3] = t.w; } dev_PBKDF2_SHA256_128_32(tstate, ostate, hash); uint32_t foundit = 0x00000000; uint32_t maybe = 0xffffffff; uint32_t target[8]; read_8_as_uint4(job->target, target); for (int j = 7; j >= 0; j--) { uint32_t tmp = swab32(ostate[j]); maybe = (maybe & (tmp <= target[j])); foundit = (foundit | (maybe & (tmp < target[j]))); } foundit = foundit ? (blockid+1) : foundit; if (foundit) { // Finding the lowest doesn't matter. Just let the first writer win. uint32_t oldval = atomicCAS(&dev_output[0], 0, foundit); } } /* Unit test kernel to expose loading, writing, reading, and storing keys */ __global__ void test_load_store_kernel(__restrict__ uint32_t *B, __restrict__ uint32_t *scratch) { uint32_t b[4], bx[4]; load_key(B, b, bx); for (int i = 0; i < 4; i++) { b[i]++; bx[i]++; } for (int slot = 0; slot < 1024; slot++) { write_keys(b, bx, scratch, slot); for (int i = 0; i < 4; i++) { b[i] = bx[i] = 0; } read_xor_keys(b, bx, scratch, slot); } store_key(B, b, bx); } /* * The CudaHasher constructor does nothing. * You must call Initialize() and check the error code. */ CudaHasher::CudaHasher() : dev_keys(NULL), dev_scratch(NULL), dev_output(NULL), dev_tstate(NULL), dev_ostate(NULL), scan_output(NULL), dev_job(NULL) { // dev_keys = NULL; // dev_scratch = NULL; } /* * Initialize() sets up the GPU state. * Between creation and destruction, a lot of memory * may be eaten on the GPU. For performance, it is * _not_ freed when ComputeHashes is not running. * ergo: If you're going to be idle for a long time * and want to be nice to other uses of the GPU, destroy * the CudaHasher and create a new one. * * If initialize fails, destroy the object to reset the GPU. */ int CudaHasher::Initialize() { hipError_t error; /* Stop eating CPU while waiting for results! */ error = hipSetDeviceFlags(hipDeviceScheduleBlockingSync); if (error != hipSuccess) { fprintf(stderr, "Could not set blocking sync (error %d)\n", error); } /* Determine device memory to size batch */ size_t free, total; hipMemGetInfo(&free, &total); //printf("Initializing. Device has %ld free of %ld total bytes of memory\n", free, total); int mem_per_job = 160000; /* A little conservative */ int max_batchsize = free / mem_per_job; /* We need 4 threads per work unit and each block should have 192 threads */ /* The number of blocks should also probably be a multiple of the * number of multiprocessors. I'm using 8 here as a simple way * to get a pretty-OK answer that's probably not optimal for big GPUs */ batchsize = (max_batchsize/(2*THREADS_PER_CUDA_BLOCK))*2*THREADS_PER_CUDA_BLOCK; n_blocks = (batchsize*THREADS_PER_SCRYPT_BLOCK/THREADS_PER_CUDA_BLOCK); error = hipMalloc((void **) &dev_job, sizeof(scan_job)); if (error != hipSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_job = NULL; return -1; } error = hipMalloc((void **) &dev_keys, sizeof(scrypt_hash) * batchsize); if (error != hipSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_keys = NULL; return -1; } size_t scratchBufSize = sizeof(scrypt_hash)*1024*batchsize; error = hipMalloc((void **) &dev_scratch, scratchBufSize); if (error != hipSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_scratch = NULL; return -1; } /* dev_output holds one int per warp indicating if a thread in that warp solved the block */ error = hipMalloc((void **) &dev_output, sizeof(uint32_t)); if (error != hipSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_output = NULL; return -1; } error = hipMalloc((void **) &dev_tstate, 8*sizeof(uint32_t) * batchsize); if (error != hipSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_tstate = NULL; return -1; } error = hipMalloc((void **) &dev_ostate, 8 * sizeof(uint32_t) * batchsize); if (error != hipSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_ostate = NULL; return -1; } scan_output = (uint32_t *)malloc(sizeof(uint32_t)); if (!scan_output) { perror("Could not allocate scan output buffer (host)"); return -1; } hipFuncSetCacheConfig(hasher_hash_kernel, hipFuncCachePreferL1); hipFuncSetCacheConfig(hasher_gen_kernel, hipFuncCachePreferL1); hipFuncSetCacheConfig(hasher_combo_kernel, hipFuncCachePreferL1); hipFuncSetCacheConfig(scrypt_hash_start_kernel, hipFuncCachePreferL1); hipFuncSetCacheConfig(scrypt_hash_finish_kernel, hipFuncCachePreferL1); return 0; } CudaHasher::~CudaHasher() { /* Free host memory */ if (scan_output) free(scan_output); /* Free device memory */ if (dev_scratch != NULL) hipFree(dev_scratch); if (dev_keys != NULL) hipFree(dev_keys); if (dev_output != NULL) hipFree(dev_output); if (dev_tstate != NULL) hipFree(dev_tstate); if (dev_ostate != NULL) hipFree(dev_ostate); if (dev_job != NULL) hipFree(dev_job); hipDeviceReset(); } static void init_test_keys(scrypt_hash *keys_in, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < SCRYPT_WIDTH; j++) { keys_in[i].b[j] = SCRYPT_WIDTH*2*i + j; } for (int j = SCRYPT_WIDTH; j < 2*SCRYPT_WIDTH; j++) { keys_in[i].bx[j-SCRYPT_WIDTH] = SCRYPT_WIDTH*2*i + j; } } } int verify_test_keys(scrypt_hash *keys_in, int n, std::string testname, int extra) { for (int i = 0; i < n; i++) { for (int j = 0; j < SCRYPT_WIDTH; j++) { if (keys_in[i].b[j] != (SCRYPT_WIDTH*2*i + j + extra)) { fprintf(stderr, "Failed %s validation of keys_in[%d].b[%d]\n", testname.c_str(), i, j); fprintf(stderr, "Got %d expected %d\n", keys_in[i].b[j], SCRYPT_WIDTH*2*i+j+extra); return -1; } } for (int j = SCRYPT_WIDTH; j < 2*SCRYPT_WIDTH; j++) { if (keys_in[i].bx[j-SCRYPT_WIDTH] != (SCRYPT_WIDTH*2*i + j + extra)) { fprintf(stderr, "Failed %s validation of keys_in[%d].bx[%d]\n", testname.c_str(), i, j-SCRYPT_WIDTH); return -1; } } } return 0; } int CudaHasher::TestLoadStore() { hipError_t error; int success = -1; scrypt_hash *keys_in = (scrypt_hash *)malloc(batchsize * sizeof(struct scrypt_hash)); if (!keys_in) { perror("TestLoadStore: Could not allocate host keys"); goto failed; } init_test_keys(keys_in, batchsize); if (verify_test_keys(keys_in, batchsize, "pre-kernel", 0) == -1) { goto failed; } error = hipMemcpy(dev_keys, keys_in, sizeof(scrypt_hash) * batchsize, hipMemcpyHostToDevice); if (error != hipSuccess) { fprintf(stderr, "TestLoadStore: Could not memcpy to device, error %d\n", error); goto failed; } hipLaunchKernelGGL(( test_load_store_kernel), dim3(n_blocks), dim3(THREADS_PER_CUDA_BLOCK), 0, 0, dev_keys, dev_scratch); error = hipMemcpy(keys_in, dev_keys, batchsize * sizeof(scrypt_hash), hipMemcpyDeviceToHost); if (error != hipSuccess) { fprintf(stderr, "TestLoadStore: Could not memcpy from device, error %d\n", error); goto failed; } if (verify_test_keys(keys_in, batchsize, "post-kernel", 1) == -1) { goto failed; } fprintf(stderr, "TestLoadStore passed\n"); success = 0; failed: free(keys_in); return success; } /* * The simplest workhorse of our computation: takes a set of n_hashes which * and computes the output. Note that if n_hashes is not equal to the batchsize, * only n_hashes will be computed. If it is less, then random hashes will be computed * alongside to keep the batch full. Version 1, remember? * * Returns when complete. * * Only one thread should call ComputeHashes at a time on any given device. */ int CudaHasher::ComputeHashes(const scrypt_hash *in, scrypt_hash *out, int n_hashes) { hipError_t error = hipMemcpy(dev_keys, in, sizeof(scrypt_hash) * n_hashes, hipMemcpyHostToDevice); if (error != hipSuccess) { fprintf(stderr, "Could not memcpy to device, error code %d, line(%d)\n", error, __LINE__); return -1; } // hasher_gen_kernel<<<n_blocks, THREADS_PER_CUDA_BLOCK>>>(dev_keys, dev_scratch); // hasher_hash_kernel<<<n_blocks, THREADS_PER_CUDA_BLOCK>>>(dev_keys, dev_scratch); hipLaunchKernelGGL(( hasher_combo_kernel), dim3(n_blocks), dim3(THREADS_PER_CUDA_BLOCK), 0, 0, dev_keys, dev_scratch); hipDeviceSynchronize(); error = hipMemcpy(out, dev_keys, n_hashes * sizeof(scrypt_hash), hipMemcpyDeviceToHost); if (error != hipSuccess) { fprintf(stderr, "Could not memcpy from device, error code %d, line(%d)\n", error, __LINE__); return -1; } return 0; } int CudaHasher::ScanNCoins(uint32_t *pdata, const uint32_t *ptarget, int n_hashes, volatile int *stop, unsigned long *hashes_done) { int n_done = 0; uint32_t data[20]; uint32_t n = pdata[19]; /* sigh, cpuminer */ memcpy(data, pdata, sizeof(data)); scan_job j; memcpy(j.data, data, sizeof(data)); memcpy(j.target, ptarget, sizeof(uint32_t)*8); /* Set up the job midstate once on the CPU */ sha256_init(j.initial_midstate); sha256_transform(j.initial_midstate, data, 0); *scan_output = 0; hipError_t error = hipMemcpy(dev_job, &j, sizeof(scan_job), hipMemcpyHostToDevice); if (error != hipSuccess) { fprintf(stderr, "Could not memcpy job to device, error code %d, line(%d)\n", error, __LINE__); exit(-1); } error = hipMemcpy(dev_output, scan_output, sizeof(uint32_t), hipMemcpyHostToDevice); if (error != hipSuccess) { fprintf(stderr, "Could not memcpy job to device, error code %d, line(%d)\n", error, __LINE__); exit(-1); } while (n_done < n_hashes && (!*stop)) { data[19] = n; // Note: The /4 is very important below: TPCB is set for the scrypt kernel; // the hashing kernel uses 1 thread per hash. static const int threads = THREADS_PER_CUDA_BLOCK; hipLaunchKernelGGL(( scrypt_hash_start_kernel), dim3(n_blocks/4), dim3(threads), 0, 0, dev_job, dev_keys, dev_ostate, dev_tstate, n); hipLaunchKernelGGL(( hasher_combo_kernel), dim3(n_blocks), dim3(threads), 0, 0, dev_keys, dev_scratch); hipLaunchKernelGGL(( scrypt_hash_finish_kernel), dim3(n_blocks/4), dim3(threads), 0, 0, dev_keys, dev_tstate, dev_ostate, dev_output, dev_job); error = hipDeviceSynchronize(); if (error != hipSuccess) { fprintf(stderr, "Kernel execution failed, error code %d, line(%d)\n", error, __LINE__); exit(-1); } error = hipMemcpy(scan_output, dev_output, sizeof(uint32_t), hipMemcpyDeviceToHost); if (error != hipSuccess) { fprintf(stderr, "Could not memcpy from device, error code %d, line(%d)\n", error, __LINE__); exit(-1); } if (*scan_output != 0) { if (hashes_done != NULL) *hashes_done += n_done; return n_done + *scan_output - 1; // -1 because of 0 blockIdx } n_done += batchsize; n += batchsize; } if (hashes_done != NULL) *hashes_done += n_done; return -1; }
90632bcbefa771a2153d5af66708242816aeb4f1.cu
/* Copyright (C) 2013 David G. Andersen. All rights reserved. * * Use of this code is covered under the Apache 2.0 license, which * can be found in the file "LICENSE" */ #include <sys/time.h> #include "hasher.h" #include "scrypt_cores.cu" /* write_keys writes the 8 keys being processed by a warp to the global * scratchpad. To effectively use memory bandwidth, it performs the writes * (and reads, for read_keys) 128 bytes at a time per memory location * by __shfl'ing the 4 entries in bx to the threads in the next-up * thread group. It then has eight threads together perform uint4 * (128 bit) writes to the destination region. This seems to make * quite effective use of memory bandwidth. An approach that spread * uint32s across more threads was slower because of the increased * computation it required. * * "start" is the loop iteration producing the write - the offset within * the block's memory. * * Internally, this algorithm first __shfl's the 4 bx entries to * the next up thread group, and then uses a conditional move to * ensure that odd-numbered thread groups exchange the b/bx ordering * so that the right parts are written together. * * Thanks to Babu for helping design the 128-bit-per-write version. * * _direct lets the caller specify the absolute start location instead of * the relative start location, as an attempt to reduce some recomputation. */ __device__ inline void write_keys_direct(const uint32_t b[4], const uint32_t bx[4], uint32_t *scratch, uint32_t start) { uint4 t, t2; t.x = b[0]; t.y = b[1]; t.z = b[2]; t.w = b[3]; int target_thread = (threadIdx.x + 4)%32; t2.x = __shfl((int)bx[0], target_thread); t2.y = __shfl((int)bx[1], target_thread); t2.z = __shfl((int)bx[2], target_thread); t2.w = __shfl((int)bx[3], target_thread); int t2_start = __shfl((int)start, target_thread) + 4; bool c = (threadIdx.x & 0x4); int loc = c ? t2_start : start; *((uint4 *)(&scratch[loc])) = (c ? t2 : t); loc = c ? start : t2_start; *((uint4 *)(&scratch[loc])) = (c ? t : t2); } __device__ inline void write_keys(const uint32_t b[4], const uint32_t bx[4], uint32_t *scratch, uint32_t start) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; start = scrypt_block*SCRYPT_SCRATCH_PER_BLOCK + (32*start) + 8*(threadIdx.x%4); write_keys_direct(b, bx, scratch, start); } inline __device__ void read_xor_keys_direct(uint32_t b[4], uint32_t bx[4], const __restrict__ uint32_t *scratch, uint32_t start) { uint4 t, t2; // Tricky bit: We do the work on behalf of thread+4, but then when // we steal, we have to steal from (thread+28)%32 to get the right // stuff back. start = __shfl((int)start, (threadIdx.x & 0x7c)) + 8*(threadIdx.x%4); int target_thread = (threadIdx.x + 4)%32; int t2_start = __shfl((int)start, target_thread) + 4; bool c = (threadIdx.x & 0x4); int loc = c ? t2_start : start; t = *((uint4 *)(&scratch[loc])); loc = c ? start : t2_start; t2 = *((uint4 *)(&scratch[loc])); uint4 tmp = t; t = (c ? t2 : t); t2 = (c ? tmp : t2); b[0] ^= t.x; b[1] ^= t.y; b[2] ^= t.z; b[3] ^= t.w; int steal_target = (threadIdx.x + 28)%32; bx[0] ^= __shfl((int)t2.x, steal_target); bx[1] ^= __shfl((int)t2.y, steal_target); bx[2] ^= __shfl((int)t2.z, steal_target); bx[3] ^= __shfl((int)t2.w, steal_target); } inline __device__ void read_xor_keys(uint32_t b[4], uint32_t bx[4], const __restrict__ uint32_t *scratch, uint32_t start) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; start = scrypt_block*SCRYPT_SCRATCH_PER_BLOCK + (32*start); read_xor_keys_direct(b, bx, scratch, start); } inline __device__ void primary_order_shuffle(uint32_t b[4], uint32_t bx[4]) { /* Inner loop shuffle targets */ int x1_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+1)&0x3); int x2_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+2)&0x3); int x3_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+3)&0x3); b[3] = __shfl((int)b[3], x1_target_lane); b[2] = __shfl((int)b[2], x2_target_lane); b[1] = __shfl((int)b[1], x3_target_lane); uint32_t tmp = b[1]; b[1] = b[3]; b[3] = tmp; bx[3] = __shfl((int)bx[3], x1_target_lane); bx[2] = __shfl((int)bx[2], x2_target_lane); bx[1] = __shfl((int)bx[1], x3_target_lane); tmp = bx[1]; bx[1] = bx[3]; bx[3] = tmp; } /* * load_key loads a 32*32bit key from a contiguous region of memory in B. * The input keys are in external order (i.e., 0, 1, 2, 3, ...). * After loading, each thread has its four b and four bx keys stored * in internal processing order. */ inline __device__ void load_key(const uint32_t *B, uint32_t b[4], uint32_t bx[4]) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; int key_offset = scrypt_block * 32; uint32_t thread_in_block = threadIdx.x % 4; // Read in permuted order. Key loads are not our bottleneck right now. for (int i = 0; i < 4; i++) { b[i] = B[key_offset + 4*thread_in_block + (thread_in_block+i)%4]; bx[i] = B[key_offset + 4*thread_in_block + (thread_in_block+i)%4 + 16]; } primary_order_shuffle(b, bx); } /* * store_key performs the opposite transform as load_key, taking * internally-ordered b and bx and storing them into a contiguous * region of B in external order. */ inline __device__ void store_key(uint32_t *B, uint32_t b[4], uint32_t bx[4]) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; int key_offset = scrypt_block * 32; uint32_t thread_in_block = threadIdx.x % 4; primary_order_shuffle(b, bx); for (int i = 0; i < 4; i++) { B[key_offset + 4*thread_in_block + (thread_in_block+i)%4] = b[i]; B[key_offset + 4*thread_in_block + (thread_in_block+i)%4 + 16] = bx[i]; } } /* * salsa_xor_core does the equivalent of the xor_salsa8 loop from * tarsnap's implementation of scrypt. The original scrypt called: * xor_salsa8(&X[0], &X[16]); <-- the "b" loop * xor_salsa8(&X[16], &X[0]); <-- the "bx" loop * This version is unrolled to handle both of these loops in a single * call to avoid unnecessary data movement. */ inline __device__ void salsa_xor_core(uint32_t b[4], uint32_t bx[4], uint32_t x[4], const int x1_target_lane, const int x2_target_lane, const int x3_target_lane) { uint32_t tmp; #pragma unroll for (int i = 0; i < 4; i++) { b[i] ^= bx[i]; x[i] = b[i]; } #define XOR_ROTATE_ADD(dst, s1, s2, amt) do { tmp = x[s1]+x[s2]; x[dst] ^= ((tmp<<amt)|(tmp>>(32-amt))); } while(0) // Enter in "column" mode (t0 has 0, 4, 8, 12) for (int j = 0; j < 4; j++) { // Mixing phase of salsa XOR_ROTATE_ADD(1, 0, 3, 7); XOR_ROTATE_ADD(2, 1, 0, 9); XOR_ROTATE_ADD(3, 2, 1, 13); XOR_ROTATE_ADD(0, 3, 2, 18); /* Transpose rows and columns. */ /* Unclear if this optimization is needed: These are ordered based * upon the dependencies needed in the later xors. Compiler should be * able to figure this out, but might as well give it a hand. */ x[1] = __shfl((int)x[1], x3_target_lane); x[3] = __shfl((int)x[3], x1_target_lane); x[2] = __shfl((int)x[2], x2_target_lane); /* The next XOR_ROTATE_ADDS could be written to be a copy-paste of the first, * but the register targets are rewritten here to swap x[1] and x[3] so that * they can be directly shuffled to and from our peer threads without * reassignment. The reverse shuffle then puts them back in the right place. */ XOR_ROTATE_ADD(3, 0, 1, 7); XOR_ROTATE_ADD(2, 3, 0, 9); XOR_ROTATE_ADD(1, 2, 3, 13); XOR_ROTATE_ADD(0, 1, 2, 18); x[3] = __shfl((int)x[3], x3_target_lane); x[1] = __shfl((int)x[1], x1_target_lane); x[2] = __shfl((int)x[2], x2_target_lane); } for (int i = 0; i < 4; i++) { b[i] += x[i]; // The next two lines are the beginning of the BX-centric loop iteration bx[i] ^= b[i]; x[i] = bx[i]; } // This is a copy of the same loop above, identical but stripped of comments. // Duplicated so that we can complete a bx-based loop with fewer register moves. for (int j = 0; j < 4; j++) { XOR_ROTATE_ADD(1, 0, 3, 7); XOR_ROTATE_ADD(2, 1, 0, 9); XOR_ROTATE_ADD(3, 2, 1, 13); XOR_ROTATE_ADD(0, 3, 2, 18); x[1] = __shfl((int)x[1], x3_target_lane); x[3] = __shfl((int)x[3], x1_target_lane); x[2] = __shfl((int)x[2], x2_target_lane); XOR_ROTATE_ADD(3, 0, 1, 7); XOR_ROTATE_ADD(2, 3, 0, 9); XOR_ROTATE_ADD(1, 2, 3, 13); XOR_ROTATE_ADD(0, 1, 2, 18); x[3] = __shfl((int)x[3], x3_target_lane); x[1] = __shfl((int)x[1], x1_target_lane); x[2] = __shfl((int)x[2], x2_target_lane); } // At the end of these iterations, the data is in primary order again. #undef XOR_ROTATE_ADD for (int i = 0; i < 4; i++) { bx[i] += x[i]; } } /* * The hasher_gen_kernel operates on a group of 1024-bit input keys * in B, stored as: * B = { k1B k1Bx k2B k2Bx ... } * and fills up the scratchpad with the iterative hashes derived from * those keys: * scratch { k1h1B k1h1Bx K1h2B K1h2Bx ... K2h1B K2h1Bx K2h2B K2h2Bx ... } * scratch is 1024 times larger than the input keys B. * It is extremely important to stream writes effectively into scratch; * less important to coalesce the reads from B. * * Key ordering note: Keys are input from B in "original" order: * K = {k1, k2, k3, k4, k5, ..., kx15, kx16, kx17, ..., kx31 } * After inputting into kernel_gen, each component k and kx of the * key is transmuted into a permuted internal order to make processing faster: * K = k, kx with: * k = 0, 4, 8, 12, 5, 9, 13, 1, 10, 14, 2, 6, 15, 3, 7, 11 * and similarly for kx. */ __global__ void hasher_gen_kernel(__restrict__ uint32_t *B, __restrict__ uint32_t *scratch) { /* Each thread operates on four of the sixteen B and Bx variables. Thus, * each key is processed by four threads in parallel. salsa_scrypt_core * internally shuffles the variables between threads (and back) as * needed. */ uint32_t b[4], bx[4], x[4]; load_key(B, b, bx); /* Inner loop shuffle targets */ int x1_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+1)&0x3); int x2_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+2)&0x3); int x3_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+3)&0x3); int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; int start = scrypt_block*SCRYPT_SCRATCH_PER_BLOCK + 8*(threadIdx.x%4); for (int i = 0; i < 1024; i++) { write_keys_direct(b, bx, scratch, start+32*i); salsa_xor_core(b, bx, x, x1_target_lane, x2_target_lane, x3_target_lane); } store_key(B, b, bx); } /* * hasher_hash_kernel runs the second phase of scrypt after the scratch * buffer is filled with the iterative hashes: It bounces through * the scratch buffer in pseudorandom order, mixing the key as it goes. */ __global__ void hasher_hash_kernel(__restrict__ uint32_t *B, const __restrict__ uint32_t *scratch) { /* Each thread operates on a group of four variables that must be processed * together. Shuffle between threaads in a warp between iterations. */ uint32_t b[4], bx[4], x[4]; load_key(B, b, bx); /* Inner loop shuffle targets */ int x1_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+1)&0x3); int x2_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+2)&0x3); int x3_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+3)&0x3); for (int i = 0; i < 1024; i++) { // Bounce through the key space and XOR the new keys in. // Critical thing: (X[16] & 1023) tells us the next slot to read. // X[16] in the original is bx[0] int slot = bx[0] & 1023; read_xor_keys(b, bx, scratch, slot); salsa_xor_core(b, bx, x, x1_target_lane, x2_target_lane, x3_target_lane); } store_key(B, b, bx); } /* * hasher_combo_kernel runs the functions of both hasher_gen_kernel * and hasher_hash_kernel in a single invocation. It is * designed to reduce kernel launch downtime a bit and omit one * intermediate store_key operation to global memory. This is faster on * my GT 550m, but seems a bit slower on a Tesla, probably because one of * the two individual kernels can use fewer registers alone. */ __global__ void hasher_combo_kernel(__restrict__ uint32_t *B, __restrict__ uint32_t *scratch) { uint32_t b[4], bx[4], x[4]; load_key(B, b, bx); int x1_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+1)&0x3); int x2_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+2)&0x3); int x3_target_lane = (threadIdx.x & 0xfc) + (((threadIdx.x & 0x03)+3)&0x3); int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_SCRYPT_BLOCK; int start = scrypt_block*SCRYPT_SCRATCH_PER_BLOCK + 8*(threadIdx.x%4); for (int i = 0; i < 1024; i++) { write_keys_direct(b, bx, scratch, start); start += 32; salsa_xor_core(b, bx, x, x1_target_lane, x2_target_lane, x3_target_lane); } start = scrypt_block*SCRYPT_SCRATCH_PER_BLOCK; for (int i = 0; i < 1024; i++) { int slot = bx[0] & 1023; read_xor_keys_direct(b, bx, scratch, start+32*slot); salsa_xor_core(b, bx, x, x1_target_lane, x2_target_lane, x3_target_lane); } store_key(B, b, bx); } /* * scrypt_hash_start_kernel takes a job description (job) and a starting nonce number (n_base) * and generates (batchsize) starting hashes using HMAC_SHA256 and PBKDF2_SHA256. * This is the first step in scrypt computation before the salsa core executions * that introduce the memory difficulty. * * This function stores three outputs: * output - the 1024-bit intermediate state fed to scrypt_core * tstate, ostate - intermediate PBKDF2 state that needs to be used again * after the execution of scrypt_core. */ __global__ void scrypt_hash_start_kernel(const __restrict__ scan_job *job, __restrict__ uint32_t *output, __restrict__ uint32_t *ostate_out, __restrict__ uint32_t *tstate_out, uint32_t n_base) { uint32_t tstate[8]; uint32_t ostate[8]; uint32_t data[20]; int blockid = (blockIdx.x*blockDim.x + threadIdx.x); /* data: the input. * tstate, ostate, output must have sufficient space * -> batchsize * 8 * sizeof(uint32_t) */ /* Trivial implementation: Each thread processes one key. This is lame, but * PBKDF related processing is only about 3.5% of runtime right now. */ const uint32_t *in_data = job->data; uint64_t blockstart = blockid*32; { uint4 d; for (int i = 0; i < 20; i+= 4) { d = *(uint4 *)&in_data[i]; data[i] = d.x; data[i+1] = d.y; data[i+2] = d.z; data[i+3] = d.w; } } data[19] = n_base + blockid; read_8_as_uint4(job->initial_midstate, tstate); dev_HMAC_SHA256_80_init(data, tstate, ostate); /* This writes directly to output and does no shuffling or cleverness * to coalesce writes. Unnecessary memory transactions, but not worth * fixing yet - PBKDF is less than 4% of runtime overall still. */ dev_PBKDF2_SHA256_80_128(tstate, ostate, data, &output[blockstart]); /* Write out (and read back) tstate and ostate interleaved across threads to improve mem b/w. Easy change, though small benefit. */ int bigblockstart = (blockIdx.x*blockDim.x)*8; int threadsInBlock = blockDim.x; for (int i = 0; i < 8; i++) { tstate_out[bigblockstart + threadsInBlock*i + threadIdx.x ] = tstate[i]; ostate_out[bigblockstart + threadsInBlock*i + threadIdx.x ] = ostate[i]; } } /* * scrypt_hash_finish_kernel takes the output state from scrypt_core and * recombines it with the saved PBKDF2 tstate and ostate from * scrypt_hash_start_kernel to produce an output key. * * It then compares the output key ("hash") to the target number * specified in the job to determine whether hash < job->target. * If it is, it puts its block/thread ID + 1 into *dev_output. * If it is not, it does nothing. The caller should ensure that * dev_output is zero before the call to scrypt_hash_finish_kernel, and * should subtract one from non-zero output to determine the * actual thread ID (and thus, the nonce used for hashing). * * This method does not guarantee that the lowest-numbered or * smallest acceptable output is returned, merely that one satisfying * output is returned if any exist. */ __global__ void scrypt_hash_finish_kernel(const __restrict__ uint32_t *dev_keys, __restrict__ uint32_t *dev_tstate, __restrict__ uint32_t *dev_ostate, __restrict__ uint32_t *dev_output, __restrict__ const scan_job *job) { uint32_t tstate[8]; uint32_t ostate[8]; uint32_t hash[32]; int blockid = (blockIdx.x*blockDim.x + threadIdx.x); int bigblockstart = blockIdx.x*blockDim.x*8; int threadsInBlock = blockDim.x; /* As in start_kernel, reads tstate/ostate interleaved between threads */ for (int i = 0; i < 8; i++) { tstate[i] = dev_tstate[bigblockstart + threadsInBlock*i + threadIdx.x ]; ostate[i] = dev_ostate[bigblockstart + threadsInBlock*i + threadIdx.x ]; } uint64_t blockstart = blockid*32; uint4 t; for (int i = 0; i < 32; i+= 4) { t = *(uint4 *)&dev_keys[blockstart+i]; hash[i] = t.x; hash[i+1] = t.y; hash[i+2] = t.z; hash[i+3] = t.w; } dev_PBKDF2_SHA256_128_32(tstate, ostate, hash); uint32_t foundit = 0x00000000; uint32_t maybe = 0xffffffff; uint32_t target[8]; read_8_as_uint4(job->target, target); for (int j = 7; j >= 0; j--) { uint32_t tmp = swab32(ostate[j]); maybe = (maybe & (tmp <= target[j])); foundit = (foundit | (maybe & (tmp < target[j]))); } foundit = foundit ? (blockid+1) : foundit; if (foundit) { // Finding the lowest doesn't matter. Just let the first writer win. uint32_t oldval = atomicCAS(&dev_output[0], 0, foundit); } } /* Unit test kernel to expose loading, writing, reading, and storing keys */ __global__ void test_load_store_kernel(__restrict__ uint32_t *B, __restrict__ uint32_t *scratch) { uint32_t b[4], bx[4]; load_key(B, b, bx); for (int i = 0; i < 4; i++) { b[i]++; bx[i]++; } for (int slot = 0; slot < 1024; slot++) { write_keys(b, bx, scratch, slot); for (int i = 0; i < 4; i++) { b[i] = bx[i] = 0; } read_xor_keys(b, bx, scratch, slot); } store_key(B, b, bx); } /* * The CudaHasher constructor does nothing. * You must call Initialize() and check the error code. */ CudaHasher::CudaHasher() : dev_keys(NULL), dev_scratch(NULL), dev_output(NULL), dev_tstate(NULL), dev_ostate(NULL), scan_output(NULL), dev_job(NULL) { // dev_keys = NULL; // dev_scratch = NULL; } /* * Initialize() sets up the GPU state. * Between creation and destruction, a lot of memory * may be eaten on the GPU. For performance, it is * _not_ freed when ComputeHashes is not running. * ergo: If you're going to be idle for a long time * and want to be nice to other uses of the GPU, destroy * the CudaHasher and create a new one. * * If initialize fails, destroy the object to reset the GPU. */ int CudaHasher::Initialize() { cudaError_t error; /* Stop eating CPU while waiting for results! */ error = cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); if (error != cudaSuccess) { fprintf(stderr, "Could not set blocking sync (error %d)\n", error); } /* Determine device memory to size batch */ size_t free, total; cudaMemGetInfo(&free, &total); //printf("Initializing. Device has %ld free of %ld total bytes of memory\n", free, total); int mem_per_job = 160000; /* A little conservative */ int max_batchsize = free / mem_per_job; /* We need 4 threads per work unit and each block should have 192 threads */ /* The number of blocks should also probably be a multiple of the * number of multiprocessors. I'm using 8 here as a simple way * to get a pretty-OK answer that's probably not optimal for big GPUs */ batchsize = (max_batchsize/(2*THREADS_PER_CUDA_BLOCK))*2*THREADS_PER_CUDA_BLOCK; n_blocks = (batchsize*THREADS_PER_SCRYPT_BLOCK/THREADS_PER_CUDA_BLOCK); error = cudaMalloc((void **) &dev_job, sizeof(scan_job)); if (error != cudaSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_job = NULL; return -1; } error = cudaMalloc((void **) &dev_keys, sizeof(scrypt_hash) * batchsize); if (error != cudaSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_keys = NULL; return -1; } size_t scratchBufSize = sizeof(scrypt_hash)*1024*batchsize; error = cudaMalloc((void **) &dev_scratch, scratchBufSize); if (error != cudaSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_scratch = NULL; return -1; } /* dev_output holds one int per warp indicating if a thread in that warp solved the block */ error = cudaMalloc((void **) &dev_output, sizeof(uint32_t)); if (error != cudaSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_output = NULL; return -1; } error = cudaMalloc((void **) &dev_tstate, 8*sizeof(uint32_t) * batchsize); if (error != cudaSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_tstate = NULL; return -1; } error = cudaMalloc((void **) &dev_ostate, 8 * sizeof(uint32_t) * batchsize); if (error != cudaSuccess) { fprintf(stderr, "Could not allocate CUDA array, error code %d, line(%d)\n", error, __LINE__); dev_ostate = NULL; return -1; } scan_output = (uint32_t *)malloc(sizeof(uint32_t)); if (!scan_output) { perror("Could not allocate scan output buffer (host)"); return -1; } cudaFuncSetCacheConfig(hasher_hash_kernel, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(hasher_gen_kernel, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(hasher_combo_kernel, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(scrypt_hash_start_kernel, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(scrypt_hash_finish_kernel, cudaFuncCachePreferL1); return 0; } CudaHasher::~CudaHasher() { /* Free host memory */ if (scan_output) free(scan_output); /* Free device memory */ if (dev_scratch != NULL) cudaFree(dev_scratch); if (dev_keys != NULL) cudaFree(dev_keys); if (dev_output != NULL) cudaFree(dev_output); if (dev_tstate != NULL) cudaFree(dev_tstate); if (dev_ostate != NULL) cudaFree(dev_ostate); if (dev_job != NULL) cudaFree(dev_job); cudaDeviceReset(); } static void init_test_keys(scrypt_hash *keys_in, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < SCRYPT_WIDTH; j++) { keys_in[i].b[j] = SCRYPT_WIDTH*2*i + j; } for (int j = SCRYPT_WIDTH; j < 2*SCRYPT_WIDTH; j++) { keys_in[i].bx[j-SCRYPT_WIDTH] = SCRYPT_WIDTH*2*i + j; } } } int verify_test_keys(scrypt_hash *keys_in, int n, std::string testname, int extra) { for (int i = 0; i < n; i++) { for (int j = 0; j < SCRYPT_WIDTH; j++) { if (keys_in[i].b[j] != (SCRYPT_WIDTH*2*i + j + extra)) { fprintf(stderr, "Failed %s validation of keys_in[%d].b[%d]\n", testname.c_str(), i, j); fprintf(stderr, "Got %d expected %d\n", keys_in[i].b[j], SCRYPT_WIDTH*2*i+j+extra); return -1; } } for (int j = SCRYPT_WIDTH; j < 2*SCRYPT_WIDTH; j++) { if (keys_in[i].bx[j-SCRYPT_WIDTH] != (SCRYPT_WIDTH*2*i + j + extra)) { fprintf(stderr, "Failed %s validation of keys_in[%d].bx[%d]\n", testname.c_str(), i, j-SCRYPT_WIDTH); return -1; } } } return 0; } int CudaHasher::TestLoadStore() { cudaError error; int success = -1; scrypt_hash *keys_in = (scrypt_hash *)malloc(batchsize * sizeof(struct scrypt_hash)); if (!keys_in) { perror("TestLoadStore: Could not allocate host keys"); goto failed; } init_test_keys(keys_in, batchsize); if (verify_test_keys(keys_in, batchsize, "pre-kernel", 0) == -1) { goto failed; } error = cudaMemcpy(dev_keys, keys_in, sizeof(scrypt_hash) * batchsize, cudaMemcpyHostToDevice); if (error != cudaSuccess) { fprintf(stderr, "TestLoadStore: Could not memcpy to device, error %d\n", error); goto failed; } test_load_store_kernel<<<n_blocks, THREADS_PER_CUDA_BLOCK>>>(dev_keys, dev_scratch); error = cudaMemcpy(keys_in, dev_keys, batchsize * sizeof(scrypt_hash), cudaMemcpyDeviceToHost); if (error != cudaSuccess) { fprintf(stderr, "TestLoadStore: Could not memcpy from device, error %d\n", error); goto failed; } if (verify_test_keys(keys_in, batchsize, "post-kernel", 1) == -1) { goto failed; } fprintf(stderr, "TestLoadStore passed\n"); success = 0; failed: free(keys_in); return success; } /* * The simplest workhorse of our computation: takes a set of n_hashes which * and computes the output. Note that if n_hashes is not equal to the batchsize, * only n_hashes will be computed. If it is less, then random hashes will be computed * alongside to keep the batch full. Version 1, remember? * * Returns when complete. * * Only one thread should call ComputeHashes at a time on any given device. */ int CudaHasher::ComputeHashes(const scrypt_hash *in, scrypt_hash *out, int n_hashes) { cudaError error = cudaMemcpy(dev_keys, in, sizeof(scrypt_hash) * n_hashes, cudaMemcpyHostToDevice); if (error != cudaSuccess) { fprintf(stderr, "Could not memcpy to device, error code %d, line(%d)\n", error, __LINE__); return -1; } // hasher_gen_kernel<<<n_blocks, THREADS_PER_CUDA_BLOCK>>>(dev_keys, dev_scratch); // hasher_hash_kernel<<<n_blocks, THREADS_PER_CUDA_BLOCK>>>(dev_keys, dev_scratch); hasher_combo_kernel<<<n_blocks, THREADS_PER_CUDA_BLOCK>>>(dev_keys, dev_scratch); cudaDeviceSynchronize(); error = cudaMemcpy(out, dev_keys, n_hashes * sizeof(scrypt_hash), cudaMemcpyDeviceToHost); if (error != cudaSuccess) { fprintf(stderr, "Could not memcpy from device, error code %d, line(%d)\n", error, __LINE__); return -1; } return 0; } int CudaHasher::ScanNCoins(uint32_t *pdata, const uint32_t *ptarget, int n_hashes, volatile int *stop, unsigned long *hashes_done) { int n_done = 0; uint32_t data[20]; uint32_t n = pdata[19]; /* sigh, cpuminer */ memcpy(data, pdata, sizeof(data)); scan_job j; memcpy(j.data, data, sizeof(data)); memcpy(j.target, ptarget, sizeof(uint32_t)*8); /* Set up the job midstate once on the CPU */ sha256_init(j.initial_midstate); sha256_transform(j.initial_midstate, data, 0); *scan_output = 0; cudaError error = cudaMemcpy(dev_job, &j, sizeof(scan_job), cudaMemcpyHostToDevice); if (error != cudaSuccess) { fprintf(stderr, "Could not memcpy job to device, error code %d, line(%d)\n", error, __LINE__); exit(-1); } error = cudaMemcpy(dev_output, scan_output, sizeof(uint32_t), cudaMemcpyHostToDevice); if (error != cudaSuccess) { fprintf(stderr, "Could not memcpy job to device, error code %d, line(%d)\n", error, __LINE__); exit(-1); } while (n_done < n_hashes && (!*stop)) { data[19] = n; // Note: The /4 is very important below: TPCB is set for the scrypt kernel; // the hashing kernel uses 1 thread per hash. static const int threads = THREADS_PER_CUDA_BLOCK; scrypt_hash_start_kernel<<<n_blocks/4, threads>>>(dev_job, dev_keys, dev_ostate, dev_tstate, n); hasher_combo_kernel<<<n_blocks, threads>>>(dev_keys, dev_scratch); scrypt_hash_finish_kernel<<<n_blocks/4, threads>>>(dev_keys, dev_tstate, dev_ostate, dev_output, dev_job); error = cudaDeviceSynchronize(); if (error != cudaSuccess) { fprintf(stderr, "Kernel execution failed, error code %d, line(%d)\n", error, __LINE__); exit(-1); } error = cudaMemcpy(scan_output, dev_output, sizeof(uint32_t), cudaMemcpyDeviceToHost); if (error != cudaSuccess) { fprintf(stderr, "Could not memcpy from device, error code %d, line(%d)\n", error, __LINE__); exit(-1); } if (*scan_output != 0) { if (hashes_done != NULL) *hashes_done += n_done; return n_done + *scan_output - 1; // -1 because of 0 blockIdx } n_done += batchsize; n += batchsize; } if (hashes_done != NULL) *hashes_done += n_done; return -1; }
318b0ab4568c8b31db3ab74bd4f1376eaa7e3bad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cmath> #define CUDA_CHECK_RETURN(value) {\ hipError_t _m_cudaStat = value;\ if (_m_cudaStat != hipSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n", hipGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } using namespace std; __global__ void matrixInitByX(float *matrix) { auto x = threadIdx.x + blockIdx.x * blockDim.x; auto y = threadIdx.y + blockIdx.y * blockDim.y; auto N = blockDim.x * gridDim.x; matrix[x + y * N] = (float) (x + y * N); } __global__ void matrixInitByY(float matrix[]) { auto y = threadIdx.x + blockIdx.x * blockDim.x; auto x = threadIdx.y + blockIdx.y * blockDim.y; auto N = blockDim.x * gridDim.x; matrix[x + y * N] = (float) (x + y * N); } __global__ void matrixTranspose(const float storage_d[], float storage_d_t[]) { auto i = threadIdx.x + blockIdx.x * blockDim.x; auto j = threadIdx.y + blockIdx.y * blockDim.y; auto N = blockDim.x * gridDim.x; storage_d_t[j + i * N] = storage_d[i + j * N]; } int main() { auto N = 1u << 8u; auto threads = 32; auto blocks = N / threads; float *matrix, *t_matrix; CUDA_CHECK_RETURN(hipMallocManaged(&matrix, N * sizeof(float))) CUDA_CHECK_RETURN(hipMallocManaged(&t_matrix, N * sizeof(float))) hipLaunchKernelGGL(( matrixInitByY), dim3(dim3(blocks, blocks)), dim3(dim3(threads, threads)), 0, 0, matrix); hipDeviceSynchronize(); CUDA_CHECK_RETURN(hipGetLastError()) // matrixTranspose<<<dim3(blocks, blocks), dim3(threads, threads)>>>(matrix, t_matrix); // hipDeviceSynchronize(); // CUDA_CHECK_RETURN(hipGetLastError()) // int side = static_cast<int>(sqrt(N)); // for (int i = 0; i < side; i++) { // for (int j = 0; j < side; j++) { // cout << setw(3) << matrix[i * side + j] << ' '; // } // cout << '\n'; // } // cout << '\n'; // for (int i = 0; i < side; i++) { // for (int j = 0; j < side; j++) { // cout << setw(3) << t_matrix[i * side + j] << ' '; // } // cout << '\n'; // } }
318b0ab4568c8b31db3ab74bd4f1376eaa7e3bad.cu
#include <iostream> #include <cmath> #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } using namespace std; __global__ void matrixInitByX(float *matrix) { auto x = threadIdx.x + blockIdx.x * blockDim.x; auto y = threadIdx.y + blockIdx.y * blockDim.y; auto N = blockDim.x * gridDim.x; matrix[x + y * N] = (float) (x + y * N); } __global__ void matrixInitByY(float matrix[]) { auto y = threadIdx.x + blockIdx.x * blockDim.x; auto x = threadIdx.y + blockIdx.y * blockDim.y; auto N = blockDim.x * gridDim.x; matrix[x + y * N] = (float) (x + y * N); } __global__ void matrixTranspose(const float storage_d[], float storage_d_t[]) { auto i = threadIdx.x + blockIdx.x * blockDim.x; auto j = threadIdx.y + blockIdx.y * blockDim.y; auto N = blockDim.x * gridDim.x; storage_d_t[j + i * N] = storage_d[i + j * N]; } int main() { auto N = 1u << 8u; auto threads = 32; auto blocks = N / threads; float *matrix, *t_matrix; CUDA_CHECK_RETURN(cudaMallocManaged(&matrix, N * sizeof(float))) CUDA_CHECK_RETURN(cudaMallocManaged(&t_matrix, N * sizeof(float))) matrixInitByY<<<dim3(blocks, blocks), dim3(threads, threads)>>>(matrix); cudaDeviceSynchronize(); CUDA_CHECK_RETURN(cudaGetLastError()) // matrixTranspose<<<dim3(blocks, blocks), dim3(threads, threads)>>>(matrix, t_matrix); // cudaDeviceSynchronize(); // CUDA_CHECK_RETURN(cudaGetLastError()) // int side = static_cast<int>(sqrt(N)); // for (int i = 0; i < side; i++) { // for (int j = 0; j < side; j++) { // cout << setw(3) << matrix[i * side + j] << ' '; // } // cout << '\n'; // } // cout << '\n'; // for (int i = 0; i < side; i++) { // for (int j = 0; j < side; j++) { // cout << setw(3) << t_matrix[i * side + j] << ' '; // } // cout << '\n'; // } }
41098dedd72cb50bb1679115c524c7525c3f8f06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<sys/time.h> #define CUDA_ERROR_EXIT(str) do{\ hipError_t err = hipGetLastError();\ if( err != hipSuccess){\ printf("Cuda Error: '%s' for %s\n", hipGetErrorString(err), str);\ exit(-1);\ }\ }while(0); #define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec)) #define USAGE_EXIT(s) do{ \ printf("Usage: %s <# of elements> <# of threads> \n %s\n", argv[0], s); \ exit(-1);\ }while(0); __global__ void xorsum(int num_elements,int *a,double l) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= num_elements) return; int k=(int)l+1; int p=num_elements/2; int num=num_elements; while(p) { if(i<p) { a[i]=a[i]^a[num-i-1]; } num=(num+1)/2; p=num/2; } } int main(int argc, char const *argv[]) { struct timeval start, end, t_start, t_end; int num_elements; int SEED,ctr,blocks; int *ptr; int *gpu_mem,*a; if(argc==3) { num_elements=atoi(argv[1]); SEED=atoi(argv[2]); } else { printf("Wrong command line arguments\n" ); exit(-1); } a = (int *)malloc(num_elements * sizeof(int)); if(!a){ USAGE_EXIT("invalid num elements, not enough memory"); } srand(SEED); for(ctr=0; ctr<num_elements; ++ctr) { a[ctr] = random(); } for (size_t i = 0; i < num_elements; i++) { printf("%d\n",a[i] ); if (i==num_elements-1) { printf("\n" ); } } gettimeofday(&t_start,NULL); hipMalloc(&gpu_mem, num_elements*sizeof(int)); CUDA_ERROR_EXIT("hipMalloc"); hipMemcpy(gpu_mem,a, num_elements*sizeof(int) , hipMemcpyHostToDevice); CUDA_ERROR_EXIT("hipMemcpy"); gettimeofday(&start, NULL); blocks = num_elements /1024; if(num_elements % 1024) { ++blocks; } double l=log(num_elements)/log(2); hipLaunchKernelGGL(( xorsum), dim3(blocks), dim3(1024), 0, 0, num_elements,gpu_mem,l); CUDA_ERROR_EXIT("kernel invocation"); gettimeofday(&end, NULL); /* Copy back result*/ hipMemcpy(a, gpu_mem,num_elements*sizeof(int),hipMemcpyDeviceToHost); CUDA_ERROR_EXIT("memcpy"); gettimeofday(&t_end, NULL); printf("Xor based checksum is %d\n",a[0]); printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end)); hipFree(gpu_mem); return 0; }
41098dedd72cb50bb1679115c524c7525c3f8f06.cu
#include<stdio.h> #include<stdlib.h> #include<sys/time.h> #define CUDA_ERROR_EXIT(str) do{\ cudaError err = cudaGetLastError();\ if( err != cudaSuccess){\ printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\ exit(-1);\ }\ }while(0); #define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec)) #define USAGE_EXIT(s) do{ \ printf("Usage: %s <# of elements> <# of threads> \n %s\n", argv[0], s); \ exit(-1);\ }while(0); __global__ void xorsum(int num_elements,int *a,double l) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= num_elements) return; int k=(int)l+1; int p=num_elements/2; int num=num_elements; while(p) { if(i<p) { a[i]=a[i]^a[num-i-1]; } num=(num+1)/2; p=num/2; } } int main(int argc, char const *argv[]) { struct timeval start, end, t_start, t_end; int num_elements; int SEED,ctr,blocks; int *ptr; int *gpu_mem,*a; if(argc==3) { num_elements=atoi(argv[1]); SEED=atoi(argv[2]); } else { printf("Wrong command line arguments\n" ); exit(-1); } a = (int *)malloc(num_elements * sizeof(int)); if(!a){ USAGE_EXIT("invalid num elements, not enough memory"); } srand(SEED); for(ctr=0; ctr<num_elements; ++ctr) { a[ctr] = random(); } for (size_t i = 0; i < num_elements; i++) { printf("%d\n",a[i] ); if (i==num_elements-1) { printf("\n" ); } } gettimeofday(&t_start,NULL); cudaMalloc(&gpu_mem, num_elements*sizeof(int)); CUDA_ERROR_EXIT("cudaMalloc"); cudaMemcpy(gpu_mem,a, num_elements*sizeof(int) , cudaMemcpyHostToDevice); CUDA_ERROR_EXIT("cudaMemcpy"); gettimeofday(&start, NULL); blocks = num_elements /1024; if(num_elements % 1024) { ++blocks; } double l=log(num_elements)/log(2); xorsum<<<blocks, 1024>>>(num_elements,gpu_mem,l); CUDA_ERROR_EXIT("kernel invocation"); gettimeofday(&end, NULL); /* Copy back result*/ cudaMemcpy(a, gpu_mem,num_elements*sizeof(int),cudaMemcpyDeviceToHost); CUDA_ERROR_EXIT("memcpy"); gettimeofday(&t_end, NULL); printf("Xor based checksum is %d\n",a[0]); printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end)); cudaFree(gpu_mem); return 0; }
522a96ac07f8ed095320ee827c7a362e74571eaf.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, false>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
522a96ac07f8ed095320ee827c7a362e74571eaf.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, false>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
9dbe2eb3164092eaca79bb79f89c380f7c9c630c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "grouping.h" #define htod hipMemcpyHostToDevice #define dtoh hipMemcpyDeviceToHost void array_ordering(int instances, vector<int> &row,vector<int> &col,vector<float> &w,int *h_row,int* h_col, float *h_w){ for(int i = 0; i < instances; i++){ h_row[i] = row[i]; h_col[i] = col[i]; h_w[i] = w[i]; } } void grouping(vector<vector<int> > &row_g, vector<vector<int> > &col_g,vector<vector<float> > &w_g,int m, int n, int instances, vector<int> &row,vector<int> &col,vector<float> &w){ for(int i = 0; i < instances; i++){ int _row = row[i]; int _col = col[i]; int _w = w[i]; int index = _row - _col; if(index < 0){ row_g[max(m,n)+index].push_back(_row); col_g[max(m,n)+index].push_back(_col); w_g[max(m,n)+index].push_back(_w); } else{ row_g[index].push_back(_row); col_g[index].push_back(_col); w_g[index].push_back(_w); } } int k = 0; for(int i = 0; i < row_g.size(); i++){ for(int j = 0; j < row_g[i].size(); j++){ row[k] = row_g[i][j]; col[k] = col_g[i][j]; w[k++] = w_g[i][j]; } } } void read(vector<int> &row,vector<int> &col,vector<float> &w,int &m, int &n, int &instances, vector<vector<int> > &test){ ifstream in; in.open("train.txt",ios_base::in); if(!in.is_open()){ cout << "error" << endl; exit(1); } //get instances, m, n string tmp; getline(in,tmp); std::istringstream is_tmp(tmp); is_tmp >> m; is_tmp >> n; is_tmp >> instances; cout << instances << endl; int count = 0; for(string line; getline(in,line); ){ std::istringstream is(line); int t1; float t2; is >> t1; row.push_back(t1-1); is >> t1; col.push_back(t1-1); is >> t2; w.push_back(t2); count++; } in.close(); ifstream testIn; in.open("test.txt",ios_base::in); if(!in.is_open()){ cout << "error" << endl; exit(1); } for(string line; getline(in,line); ){ std::istringstream is(line); int t1; vector<int> tmp; is >> t1; tmp.push_back(t1-1); is >> t1; tmp.push_back(t1-1); is >> t1; tmp.push_back(t1); test.push_back(tmp); } cout << "test data extraction complete, total: " << test.size() << " test instances"<< endl; in.close(); } float standard_deviation(float data[], int n, float &m){ float mean=0.0, sum_deviation=0.0; int i; for(i=0; i<n;++i) { mean+=data[i]; } mean=mean/n; for(i=0; i<n;++i) sum_deviation+=(data[i]-mean)*(data[i]-mean); m = mean; return sqrt(sum_deviation/n); } __global__ void print(float *y){ int warp_index = threadIdx.x % 32; int warp_id = threadIdx.x / 32; int index = blockIdx.x*blockDim.x/32 + warp_id; if (index == 933) printf("%f \n",y[index*32+warp_index]); }
9dbe2eb3164092eaca79bb79f89c380f7c9c630c.cu
#include "grouping.h" #define htod cudaMemcpyHostToDevice #define dtoh cudaMemcpyDeviceToHost void array_ordering(int instances, vector<int> &row,vector<int> &col,vector<float> &w,int *h_row,int* h_col, float *h_w){ for(int i = 0; i < instances; i++){ h_row[i] = row[i]; h_col[i] = col[i]; h_w[i] = w[i]; } } void grouping(vector<vector<int> > &row_g, vector<vector<int> > &col_g,vector<vector<float> > &w_g,int m, int n, int instances, vector<int> &row,vector<int> &col,vector<float> &w){ for(int i = 0; i < instances; i++){ int _row = row[i]; int _col = col[i]; int _w = w[i]; int index = _row - _col; if(index < 0){ row_g[max(m,n)+index].push_back(_row); col_g[max(m,n)+index].push_back(_col); w_g[max(m,n)+index].push_back(_w); } else{ row_g[index].push_back(_row); col_g[index].push_back(_col); w_g[index].push_back(_w); } } int k = 0; for(int i = 0; i < row_g.size(); i++){ for(int j = 0; j < row_g[i].size(); j++){ row[k] = row_g[i][j]; col[k] = col_g[i][j]; w[k++] = w_g[i][j]; } } } void read(vector<int> &row,vector<int> &col,vector<float> &w,int &m, int &n, int &instances, vector<vector<int> > &test){ ifstream in; in.open("train.txt",ios_base::in); if(!in.is_open()){ cout << "error" << endl; exit(1); } //get instances, m, n string tmp; getline(in,tmp); std::istringstream is_tmp(tmp); is_tmp >> m; is_tmp >> n; is_tmp >> instances; cout << instances << endl; int count = 0; for(string line; getline(in,line); ){ std::istringstream is(line); int t1; float t2; is >> t1; row.push_back(t1-1); is >> t1; col.push_back(t1-1); is >> t2; w.push_back(t2); count++; } in.close(); ifstream testIn; in.open("test.txt",ios_base::in); if(!in.is_open()){ cout << "error" << endl; exit(1); } for(string line; getline(in,line); ){ std::istringstream is(line); int t1; vector<int> tmp; is >> t1; tmp.push_back(t1-1); is >> t1; tmp.push_back(t1-1); is >> t1; tmp.push_back(t1); test.push_back(tmp); } cout << "test data extraction complete, total: " << test.size() << " test instances"<< endl; in.close(); } float standard_deviation(float data[], int n, float &m){ float mean=0.0, sum_deviation=0.0; int i; for(i=0; i<n;++i) { mean+=data[i]; } mean=mean/n; for(i=0; i<n;++i) sum_deviation+=(data[i]-mean)*(data[i]-mean); m = mean; return sqrt(sum_deviation/n); } __global__ void print(float *y){ int warp_index = threadIdx.x % 32; int warp_id = threadIdx.x / 32; int index = blockIdx.x*blockDim.x/32 + warp_id; if (index == 933) printf("%f \n",y[index*32+warp_index]); }
e4da4b087fdd3ea8e5fb5eef5e7ba758decdccd9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "XLib.hpp" using namespace timer_cuda; const int SIZE = (1 << 27); const int BLOCKDIM = 256; int main() { int* devInput; hipMalloc(&devInput, SIZE * sizeof(int)); Timer<DEVICE> TM; TM.start(); hipLaunchKernelGGL(( cuda_util::fill), dim3(SIZE / BLOCKDIM), dim3(BLOCKDIM), 0, 0, devInput, 1024, 131072, 1, 1024); TM.getTime("fill1"); __CUDA_ERROR("A") TM.start(); hipLaunchKernelGGL(( cuda_util::fill2), dim3(SIZE / BLOCKDIM), dim3(BLOCKDIM), 0, 0, devInput, 1024, 131072, 1, 1024); TM.getTime("fill2"); __CUDA_ERROR("B") }
e4da4b087fdd3ea8e5fb5eef5e7ba758decdccd9.cu
#include "XLib.hpp" using namespace timer_cuda; const int SIZE = (1 << 27); const int BLOCKDIM = 256; int main() { int* devInput; cudaMalloc(&devInput, SIZE * sizeof(int)); Timer<DEVICE> TM; TM.start(); cuda_util::fill<<<SIZE / BLOCKDIM, BLOCKDIM>>>(devInput, 1024, 131072, 1, 1024); TM.getTime("fill1"); __CUDA_ERROR("A") TM.start(); cuda_util::fill2<<<SIZE / BLOCKDIM, BLOCKDIM>>>(devInput, 1024, 131072, 1, 1024); TM.getTime("fill2"); __CUDA_ERROR("B") }
f2e6b79a30af7d43976f7bc7068c7a5c986f7724.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_log1p (int n, double *result, double *x) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = log1p(x[id]); } }
f2e6b79a30af7d43976f7bc7068c7a5c986f7724.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_log1p (int n, double *result, double *x) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = log1p(x[id]); } }
de5de1ca45af63f961a3c672868972a18af59984.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cudaDSaturation_propagate_kernel(double* x, double* y, unsigned int size, int shifting, double threshold) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = index; i < size; i += stride) { double value = x[i]; if (shifting > 0) value /= (1 << shifting); else if (shifting < 0) value *= (1 << (-shifting)); if (threshold != 0.0) { y[i] = (value < -threshold) ? -threshold : (value > threshold) ? threshold : value; } } }
de5de1ca45af63f961a3c672868972a18af59984.cu
#include "includes.h" __global__ void cudaDSaturation_propagate_kernel(double* x, double* y, unsigned int size, int shifting, double threshold) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = index; i < size; i += stride) { double value = x[i]; if (shifting > 0) value /= (1 << shifting); else if (shifting < 0) value *= (1 << (-shifting)); if (threshold != 0.0) { y[i] = (value < -threshold) ? -threshold : (value > threshold) ? threshold : value; } } }
c5f121c413303b72b23779054efa3197d36964b4.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <ATen/ATen.h> using namespace at; #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <iostream> // Cuda tensor accessor definitions // restrict pointer traits piroritize speed over memory consumption #define TensorAcc4R PackedTensorAccessor<scalar_t,4,RestrictPtrTraits,int32_t> #define TensorAcc5R PackedTensorAccessor<scalar_t,5,RestrictPtrTraits,int32_t> #define WITHIN_BOUNDS(x, y, H, W) (x >= 0 && x < H && y >= 0 && y < W) #define THREADS_FORWARD 32 #define THREADS_BACKWARD 5 namespace { template <typename scalar_t> __global__ void correlation_cuda_forward_kernel( const TensorAcc4R rInput1, const TensorAcc4R rInput2, TensorAcc5R output, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { const int iH = rInput1.size(1); const int iW = rInput1.size(2); const int C = rInput1.size(3); const int n = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int thread = threadIdx.x; const int start_i = -padH + h * dH; const int start_j = -padW + w * dW; const int patchRadH = dilation_patchH * (patchH - 1) / 2; const int patchRadW = dilation_patchW * (patchW - 1) / 2; __shared__ scalar_t prod_sum[THREADS_FORWARD]; for(int ph = 0; ph < patchH; ++ph){ int ph_dilated = ph * dilation_patchH - patchRadH; for(int pw = 0; pw < patchW; ++pw){ int pw_dilated = pw * dilation_patchW - patchRadW; prod_sum[thread] = 0; for (int i=0; i<kH; ++i){ int i1 = start_i + i; int i2 = i1 + ph_dilated; if WITHIN_BOUNDS(i1, i2, iH, iH){ for (int j=0; j<kW; ++j){ int j1 = start_j + j; int j2 = j1 + pw_dilated; if WITHIN_BOUNDS(j1, j2, iW, iW){ for (int c=thread; c<C; c += THREADS_FORWARD){ scalar_t v1 = rInput1[n][i1][j1][c]; scalar_t v2 = rInput2[n][i2][j2][c]; prod_sum[thread] += v1 * v2; } } } } } // accumulate __syncthreads(); if (thread == 0) { scalar_t reduce_sum = 0; for (int index = 0; index < THREADS_FORWARD; ++index) { reduce_sum += prod_sum[index]; } output[n][ph][pw][h][w] = reduce_sum; } } } } template <typename scalar_t> __global__ void correlation_cuda_backward_kernel_input1( const TensorAcc5R gradOutput, const TensorAcc4R input2, TensorAcc4R gradInput1, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW, int batch) { const int iH = input2.size(2); const int iW = input2.size(3); const int H = gradOutput.size(3); const int W = gradOutput.size(4); const int patchRadH = (patchH - 1) / 2; const int patchRadW = (patchW - 1) / 2; const int n = batch; const int c = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int ph_off = threadIdx.x; const int pw_off = threadIdx.y; const int h_2 = h + padH; const int w_2 = w + padW; const int start_i2 = h_2 / dH; const int start_j2 = w_2 / dW; /*we perform a module but since we have the quotient, we can cheat a bit*/ const int h_off = h_2 - start_i2 * dH; const int w_off = w_2 - start_j2 * dW; __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; prod_sum[ph_off][pw_off] = 0; for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { int i1 = h + dilation_patchH * (ph - patchRadH); for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { int j1 = w + dilation_patchW * (pw - patchRadW); if WITHIN_BOUNDS(i1, j1, iH, iW) { scalar_t val = input2[n][c][i1][j1]; for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) { int i2 = start_i2 - i; for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) { int j2 = start_j2 - j; if WITHIN_BOUNDS(i2, j2, H, W) { prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val; } } } } } } __syncthreads(); if (ph_off == 0 && pw_off == 0){ scalar_t reduce_sum =0; for (int ph = 0; ph < THREADS_BACKWARD; ++ph){ for (int pw = 0; pw < THREADS_BACKWARD; ++pw){ reduce_sum += prod_sum[ph][pw]; } } gradInput1[n][c][h][w] = reduce_sum; } } template <typename scalar_t> __global__ void correlation_cuda_backward_kernel_input2( const TensorAcc5R gradOutput, const TensorAcc4R input1, TensorAcc4R gradInput2, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW, int batch) { const int iH = input1.size(2); const int iW = input1.size(3); const int patchRadH = (patchH - 1) / 2; const int patchRadW = (patchW - 1) / 2; const int H = gradOutput.size(3); const int W = gradOutput.size(4); const int n = batch; const int c = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int ph_off = threadIdx.x; const int pw_off = threadIdx.y; __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; prod_sum[ph_off][pw_off] = 0; for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { int i1 = h - dilation_patchH * (ph - patchRadH); for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { int j1 = w - dilation_patchW * (pw - patchRadW); if WITHIN_BOUNDS(i1, j1, iH, iW) { scalar_t val = input1[n][c][i1][j1]; const int h_2 = i1 + padH; const int w_2 = j1 + padW; const int start_i2 = h_2 / dH; const int start_j2 = w_2 / dW; const int h_off = h_2 - start_i2 * dH; const int w_off = w_2 - start_j2 * dW; for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) { int i2 = start_i2 - i; for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) { int j2 = start_j2 - j; if WITHIN_BOUNDS(i2, j2, H, W) { prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val; } } } } } } __syncthreads(); if (ph_off == 0 && pw_off == 0){ scalar_t reduce_sum =0; for (int ph = 0; ph < THREADS_BACKWARD; ++ph){ for (int pw = 0; pw < THREADS_BACKWARD; ++pw){ reduce_sum += prod_sum[ph][pw]; } } gradInput2[n][c][h][w] = reduce_sum; } } } //cpp torch::Tensor correlation_cuda_forward( torch::Tensor input1, torch::Tensor input2, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { const int batch_size = input1.size(0); const int iH = input1.size(2); const int iW = input1.size(3); const auto oH = (iH + 2 * padH - kH) / dH + 1; const auto oW = (iW + 2 * padW - kW) / dW + 1; auto output = torch::zeros({batch_size, patchH, patchW, oH, oW}, input1.options()); auto trInput1 = input1.permute({0, 2, 3, 1}).contiguous(); auto trInput2 = input2.permute({0, 2, 3, 1}).contiguous(); const int threads = THREADS_FORWARD; const dim3 blocks(batch_size, oH, oW); AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_forward_cuda", ([&] { TensorAcc4R trInput1_acc = trInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R trInput2_acc = trInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc5R output_acc = output.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>(); hipLaunchKernelGGL(( correlation_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, trInput1_acc, trInput2_acc, output_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW); })); return output; } std::vector<torch::Tensor> correlation_cuda_backward( torch::Tensor input1, torch::Tensor input2, torch::Tensor gradOutput, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { auto gradInput1 = torch::zeros_like(input1); auto gradInput2 = torch::zeros_like(input2); const int batch_size = input1.size(0); const int iH = input1.size(2); const int iW = input1.size(3); const int C = input1.size(1); const dim3 blocks(C, iH, iW); const dim3 threads(THREADS_BACKWARD, THREADS_BACKWARD); AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_backward_cuda", ([&] { TensorAcc4R input1_acc = input1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R input2_acc = input2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R gradInput1_acc = gradInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R gradInput2_acc = gradInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc5R gradOutput_acc = gradOutput.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>(); for (int n = 0; n < batch_size; ++n){ hipLaunchKernelGGL(( correlation_cuda_backward_kernel_input1<scalar_t>), dim3(blocks), dim3(threads), 0, 0, gradOutput_acc, input2_acc, gradInput1_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW, n); } for (int n = 0; n < batch_size; ++n){ hipLaunchKernelGGL(( correlation_cuda_backward_kernel_input2<scalar_t>), dim3(blocks), dim3(threads), 0, 0, gradOutput_acc, input1_acc, gradInput2_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW, n); } })); return {gradInput1, gradInput2}; }
c5f121c413303b72b23779054efa3197d36964b4.cu
#include <torch/extension.h> #include <ATen/ATen.h> using namespace at; #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <iostream> // Cuda tensor accessor definitions // restrict pointer traits piroritize speed over memory consumption #define TensorAcc4R PackedTensorAccessor<scalar_t,4,RestrictPtrTraits,int32_t> #define TensorAcc5R PackedTensorAccessor<scalar_t,5,RestrictPtrTraits,int32_t> #define WITHIN_BOUNDS(x, y, H, W) (x >= 0 && x < H && y >= 0 && y < W) #define THREADS_FORWARD 32 #define THREADS_BACKWARD 5 namespace { template <typename scalar_t> __global__ void correlation_cuda_forward_kernel( const TensorAcc4R rInput1, const TensorAcc4R rInput2, TensorAcc5R output, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { const int iH = rInput1.size(1); const int iW = rInput1.size(2); const int C = rInput1.size(3); const int n = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int thread = threadIdx.x; const int start_i = -padH + h * dH; const int start_j = -padW + w * dW; const int patchRadH = dilation_patchH * (patchH - 1) / 2; const int patchRadW = dilation_patchW * (patchW - 1) / 2; __shared__ scalar_t prod_sum[THREADS_FORWARD]; for(int ph = 0; ph < patchH; ++ph){ int ph_dilated = ph * dilation_patchH - patchRadH; for(int pw = 0; pw < patchW; ++pw){ int pw_dilated = pw * dilation_patchW - patchRadW; prod_sum[thread] = 0; for (int i=0; i<kH; ++i){ int i1 = start_i + i; int i2 = i1 + ph_dilated; if WITHIN_BOUNDS(i1, i2, iH, iH){ for (int j=0; j<kW; ++j){ int j1 = start_j + j; int j2 = j1 + pw_dilated; if WITHIN_BOUNDS(j1, j2, iW, iW){ for (int c=thread; c<C; c += THREADS_FORWARD){ scalar_t v1 = rInput1[n][i1][j1][c]; scalar_t v2 = rInput2[n][i2][j2][c]; prod_sum[thread] += v1 * v2; } } } } } // accumulate __syncthreads(); if (thread == 0) { scalar_t reduce_sum = 0; for (int index = 0; index < THREADS_FORWARD; ++index) { reduce_sum += prod_sum[index]; } output[n][ph][pw][h][w] = reduce_sum; } } } } template <typename scalar_t> __global__ void correlation_cuda_backward_kernel_input1( const TensorAcc5R gradOutput, const TensorAcc4R input2, TensorAcc4R gradInput1, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW, int batch) { const int iH = input2.size(2); const int iW = input2.size(3); const int H = gradOutput.size(3); const int W = gradOutput.size(4); const int patchRadH = (patchH - 1) / 2; const int patchRadW = (patchW - 1) / 2; const int n = batch; const int c = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int ph_off = threadIdx.x; const int pw_off = threadIdx.y; const int h_2 = h + padH; const int w_2 = w + padW; const int start_i2 = h_2 / dH; const int start_j2 = w_2 / dW; /*we perform a module but since we have the quotient, we can cheat a bit*/ const int h_off = h_2 - start_i2 * dH; const int w_off = w_2 - start_j2 * dW; __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; prod_sum[ph_off][pw_off] = 0; for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { int i1 = h + dilation_patchH * (ph - patchRadH); for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { int j1 = w + dilation_patchW * (pw - patchRadW); if WITHIN_BOUNDS(i1, j1, iH, iW) { scalar_t val = input2[n][c][i1][j1]; for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) { int i2 = start_i2 - i; for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) { int j2 = start_j2 - j; if WITHIN_BOUNDS(i2, j2, H, W) { prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val; } } } } } } __syncthreads(); if (ph_off == 0 && pw_off == 0){ scalar_t reduce_sum =0; for (int ph = 0; ph < THREADS_BACKWARD; ++ph){ for (int pw = 0; pw < THREADS_BACKWARD; ++pw){ reduce_sum += prod_sum[ph][pw]; } } gradInput1[n][c][h][w] = reduce_sum; } } template <typename scalar_t> __global__ void correlation_cuda_backward_kernel_input2( const TensorAcc5R gradOutput, const TensorAcc4R input1, TensorAcc4R gradInput2, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW, int batch) { const int iH = input1.size(2); const int iW = input1.size(3); const int patchRadH = (patchH - 1) / 2; const int patchRadW = (patchW - 1) / 2; const int H = gradOutput.size(3); const int W = gradOutput.size(4); const int n = batch; const int c = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int ph_off = threadIdx.x; const int pw_off = threadIdx.y; __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; prod_sum[ph_off][pw_off] = 0; for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { int i1 = h - dilation_patchH * (ph - patchRadH); for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { int j1 = w - dilation_patchW * (pw - patchRadW); if WITHIN_BOUNDS(i1, j1, iH, iW) { scalar_t val = input1[n][c][i1][j1]; const int h_2 = i1 + padH; const int w_2 = j1 + padW; const int start_i2 = h_2 / dH; const int start_j2 = w_2 / dW; const int h_off = h_2 - start_i2 * dH; const int w_off = w_2 - start_j2 * dW; for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) { int i2 = start_i2 - i; for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) { int j2 = start_j2 - j; if WITHIN_BOUNDS(i2, j2, H, W) { prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val; } } } } } } __syncthreads(); if (ph_off == 0 && pw_off == 0){ scalar_t reduce_sum =0; for (int ph = 0; ph < THREADS_BACKWARD; ++ph){ for (int pw = 0; pw < THREADS_BACKWARD; ++pw){ reduce_sum += prod_sum[ph][pw]; } } gradInput2[n][c][h][w] = reduce_sum; } } } //cpp torch::Tensor correlation_cuda_forward( torch::Tensor input1, torch::Tensor input2, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { const int batch_size = input1.size(0); const int iH = input1.size(2); const int iW = input1.size(3); const auto oH = (iH + 2 * padH - kH) / dH + 1; const auto oW = (iW + 2 * padW - kW) / dW + 1; auto output = torch::zeros({batch_size, patchH, patchW, oH, oW}, input1.options()); auto trInput1 = input1.permute({0, 2, 3, 1}).contiguous(); auto trInput2 = input2.permute({0, 2, 3, 1}).contiguous(); const int threads = THREADS_FORWARD; const dim3 blocks(batch_size, oH, oW); AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_forward_cuda", ([&] { TensorAcc4R trInput1_acc = trInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R trInput2_acc = trInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc5R output_acc = output.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>(); correlation_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( trInput1_acc, trInput2_acc, output_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW); })); return output; } std::vector<torch::Tensor> correlation_cuda_backward( torch::Tensor input1, torch::Tensor input2, torch::Tensor gradOutput, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { auto gradInput1 = torch::zeros_like(input1); auto gradInput2 = torch::zeros_like(input2); const int batch_size = input1.size(0); const int iH = input1.size(2); const int iW = input1.size(3); const int C = input1.size(1); const dim3 blocks(C, iH, iW); const dim3 threads(THREADS_BACKWARD, THREADS_BACKWARD); AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_backward_cuda", ([&] { TensorAcc4R input1_acc = input1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R input2_acc = input2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R gradInput1_acc = gradInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R gradInput2_acc = gradInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc5R gradOutput_acc = gradOutput.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>(); for (int n = 0; n < batch_size; ++n){ correlation_cuda_backward_kernel_input1<scalar_t><<<blocks, threads>>>( gradOutput_acc, input2_acc, gradInput1_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW, n); } for (int n = 0; n < batch_size; ++n){ correlation_cuda_backward_kernel_input2<scalar_t><<<blocks, threads>>>( gradOutput_acc, input1_acc, gradInput2_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW, n); } })); return {gradInput1, gradInput2}; }
3e452c9f22e157dc6b257185f5f7b4012df128b5.hip
// !!! This is a file automatically generated by hipify!!! //@copyright All rights are reserved, this code/project is not Open Source or Free //@bug None Documented //@author Nathaniel Crossman (U00828694) //@email [email protected] // //@Professor Meilin Liu //@Course_Number CS 4370/6370-90 //@date Thursday, September 26, 2019 // //@project_name: // Task 1: Basic Matrix Addition // Task 2: Basic Matrix Multiplication // System includes #include <stdio.h> #include <math.h> #include <string.h> #include <assert.h> // CUDA runtime #include "hip/hip_runtime.h" #include <hip/hip_runtime.h> #include "device_launch_parameters.h" //#include <helper_functions.h> //#include <helper_cuda.h> /** * Matrix Addtions (CUDA Kernel) on the device: C = A + B * Matrix are all of the same size dim (2*2, 16*16) */ __global__ void add_matrix_gpu(int *devise_matrix_A, int *devise_matrix_B, int *devise_matrix_C, int width) { int row = blockIdx.x*blockDim.x + threadIdx.x; int col = blockIdx.y*blockDim.y + threadIdx.y; int index = row * width + col; int p_value = 0; if ((row < width) && (col < width)) { p_value = devise_matrix_A[index] + devise_matrix_B[index]; devise_matrix_C[index] = p_value; } } __global__ void matrixMulKernel(int* devise_matrix_A, int* devise_matrix_B, int * devise_matrix_C, int width) { int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int i = 0, j = 0,index = 0; if ((row < width) && (col < width)){ int p_value = 0; for (int k = 0; k < width; ++k) { i = row * width + k; j = k * width + col; p_value += devise_matrix_A[i] * devise_matrix_B[j]; } index = row * width + col; devise_matrix_C[index] = p_value; } } // Kernel functions above //-------------------------------------------------------------------------- void freeMemory_h(int* h_matrix_A, int* h_matrix_B, int*h_matrix_C, int *h_matrix_final_gpu); void freeMemory_d(int* d_matrix_A, int* d_matrix_B, int*d_matrix_C); void add_matrix_cpu(int *matrix_A, int *matrix_B, int *matrix_C, int width); int verify(int *matrix_A, int *matrix_B, int width); void addMatrixMain(); //Helper f int menuShow(); void mainSwitch(int option); void cls(); int debugOn(); void matrixMulOnHost(int* M, int* N, int* P, int width); void multiplicationMatrixMain(); void initializationM(int *h_matrix_A, int *h_matrix_B, int width); //helper for both parts void getBlockSize(int &blockSize); void getWidth(int &width); void initialization(int *h_matrix_A, int *h_matrix_B, int width); void printf_matrix(int *matrix_A, int width); //Above is all prototypes int main() { // This will pick the best possible CUDA capable device, otherwise // override the device ID based on input provided at the command line //int dev = findCudaDevice(argc, (const char **)argv); while (true) { mainSwitch(menuShow()); printf("\n"); } return 0; } int menuShow() { int hold; do { printf("1. Add Matrix \n"); printf("2. Multiply Matrix \n"); printf("3. Quit\n"); printf("---------------------------------------\n"); printf("Enter Choice: "); scanf("%d", &hold); if (hold < 1 || hold > 3) { cls(); } } while (hold < 1 || hold > 3); return hold; } void cls() { for (int i = 0; i < 30; i++) printf("\n"); system("@cls||clear"); } /* This function is like the driver function. It hold the switch statement that called the function. */ void mainSwitch(int option) { switch (option) { case 1: addMatrixMain(); break; case 2: multiplicationMatrixMain(); break; case 3: exit(0); break; } } void getWidth(int &width) { printf("Please specify your square matrix dimension\n"); printf("For example, you could enter 64 and the matrix dimension 64*64\n"); printf("Enter Square Matrix size:"); scanf("%d", &width); cls(); } void getBlockSize(int &blockSize) { printf("Please specify your Block size \n"); printf("For example, you could enter 4 and the block size would be 4 * 4 \n"); printf("Enter Block Size:"); scanf("%d", &blockSize); cls(); } void initialization(int *h_matrix_A, int *h_matrix_B, int width) { int i = 0, j = 0, index = 0; int init = 1325; for (i = 0; i<width; ++i) { for (j = 0; j<width; ++j) { index = i * width + j; init = 3125 * init % 65536; h_matrix_A[index] = (init - 32768) / 16384; h_matrix_B[index] = init % 1000; } } } void initializationM(int *h_matrix_A, int *h_matrix_B, int width) { int i = 0, j = 0, index = 0; int init = 1325; for (i = 0; i<width; ++i) { for (j = 0; j<width; ++j) { index = i * width + j; init = 3125 * init % 65536; h_matrix_A[index] = (init - 32768) / 6553; h_matrix_B[index] = init % 1000; } } } /* We are working on the host so we can use a 2D array like normal */ void add_matrix_cpu(int *matrix_A, int *matrix_B, int *matrix_C, int width) { int i, j, index; //pre-increment operator (++i) merely increments and returns. faster then i++ for (i = 0; i < width; ++i) { //row = y for (j = 0; j < width; ++j) { //col = x index = i * width + j; //index = i + j * width; matrix_C[index] = matrix_A[index] + matrix_B[index]; } } } void printf_matrix(int *matrix_A, int width) { int i, j, index; for (i = 0; i < width; ++i) { for (j = 0; j < width; ++j) { index = i * width + j; printf("%d \t", matrix_A[index]); } printf("\n"); } printf("\n"); } int verify(int *matrix_A, int *matrix_B, int width) { int index = 0; for (int i = 0; i < width; i++){ for (int j = 0; j < width; j++){ //index = i + j * width; index = i * width + j; if (matrix_A[index] != matrix_B[index]){ printf("Test failed\n"); return 0; } } } printf("The Test Passed\n"); return 1; } void matrixMulOnHost(int* M, int* N, int* P, int width){ for (int i = 0; i<width; i++) { for (int j = 0; j < width; ++j) { int sum = 0; for (int k = 0; k < width; ++k) { int a = M[i * width + k]; int b = N[k * width + j]; sum += a * b; } P[i * width + j] = sum; } } } int debugOn() { int hold; do { printf("\nRun in debug mode?\n"); printf("Debug mode prints out alot of helpful info,\nbut it can takes a long time with big matrixes\n"); printf("Enter 1 for Yes and 0 for No:"); scanf("%d", &hold); if (hold < 0 || hold > 1) { cls(); } } while (hold < 0 || hold > 1); cls(); return hold; } void addMatrixMain() { int width = 0, blockSize = 0; int *h_matrix_A, *h_matrix_B, *h_matrix_C; int *d_matrix_A, *d_matrix_B, *d_matrix_C; int * h_matrix_final_gpu; int booleanValue = debugOn(); getWidth(width); getBlockSize(blockSize); printf("Matrix Size: %d * %d \nSize of Thread block: %d * %d", width, width, blockSize, blockSize); printf("\n\n"); printf("Adding...\n"); //The size of all matrixes size_t dsize = (width * width) * sizeof(int); //Allocate memory for matrices on host h_matrix_A = (int*)malloc(dsize); h_matrix_B = (int*)malloc(dsize); h_matrix_C = (int*)malloc(dsize); h_matrix_final_gpu = (int*)malloc(dsize); //Set all matrices to 0 , Not needed but used for testing memset(h_matrix_A, 0, dsize); memset(h_matrix_B, 0, dsize); memset(h_matrix_C, 0, dsize); //Allocate memory for device Matrix hipMalloc((void **)(&d_matrix_A), dsize); hipMalloc((void **)(&d_matrix_B), dsize); hipMalloc((void **)(&d_matrix_C), dsize); //checkCudaErrors(hipMalloc((void **)(&d_matrix_A), dsize)); //checkCudaErrors(hipMalloc((void **)(&d_matrix_B), dsize)); //checkCudaErrors(hipMalloc((void **)(&d_matrix_C), dsize)); initialization(h_matrix_A, h_matrix_B, width); add_matrix_cpu(h_matrix_A, h_matrix_B, h_matrix_C, width); if (booleanValue) { printf_matrix(h_matrix_A, width); printf_matrix(h_matrix_B, width); printf("\nThe results of CPU addition\n"); printf_matrix(h_matrix_C, width); } //copy the Matrices from Host to Device hipMemcpy(d_matrix_A, h_matrix_A, dsize, hipMemcpyHostToDevice); hipMemcpy(d_matrix_B, h_matrix_B, dsize, hipMemcpyHostToDevice); /*checkCudaErrors(hipMemcpy(d_matrix_A, h_matrix_A, dsize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_matrix_B, h_matrix_B, dsize, hipMemcpyHostToDevice));*/ dim3 dimBlock(blockSize, blockSize); dim3 dimGrid(ceil(((double)width) / dimBlock.x), ceil(((double)width) / dimBlock.y)); //GPU add_matrix_gpu << < dimGrid, dimBlock >> >(d_matrix_A, d_matrix_B, d_matrix_C, width); // Copy result from device to host hipMemcpy(h_matrix_final_gpu, d_matrix_C, dsize, hipMemcpyDeviceToHost); //checkCudaErrors(hipMemcpy(h_matrix_final_gpu, d_matrix_C, dsize, hipMemcpyHostToDevice)); printf("GPU done Addition\n"); if (booleanValue) { printf("\nThe results of GPU Addition\n"); printf_matrix(h_matrix_final_gpu, width); } printf("\nVerifying\n"); verify(h_matrix_C, h_matrix_final_gpu, width); // Clean up memory freeMemory_h(h_matrix_A, h_matrix_B, h_matrix_C, h_matrix_final_gpu); freeMemory_d(d_matrix_A, d_matrix_B, d_matrix_C); } void multiplicationMatrixMain() { int width = 0, blockSize = 0; int *h_matrix_A, *h_matrix_B, *h_matrix_C; int *d_matrix_A, *d_matrix_B, *d_matrix_C; int * h_matrix_final_gpu; int booleanValue = debugOn(); getWidth(width); getBlockSize(blockSize); printf("Matrix Size: %d * %d \nSize of Thread block: %d * %d", width, width, blockSize, blockSize); printf("\n\n"); printf("multiplying....\n"); //The size of all matrixes size_t dsize = (width * width) * sizeof(int); //Allocate memory for matrices on host h_matrix_A = (int*)malloc(dsize); h_matrix_B = (int*)malloc(dsize); h_matrix_C = (int*)malloc(dsize); h_matrix_final_gpu = (int*)malloc(dsize); //Set all matrices to 0 , Not needed but used for testing memset(h_matrix_A, 0, dsize); memset(h_matrix_B, 0, dsize); memset(h_matrix_C, 0, dsize); //Allocate memory for device Matrix hipMalloc((void **)(&d_matrix_A), dsize); hipMalloc((void **)(&d_matrix_B), dsize); hipMalloc((void **)(&d_matrix_C), dsize); /*checkCudaErrors(hipMalloc((void **)(&d_matrix_A), dsize)); checkCudaErrors(hipMalloc((void **)(&d_matrix_B), dsize)); checkCudaErrors(hipMalloc((void **)(&d_matrix_C), dsize));*/ initializationM(h_matrix_A, h_matrix_B, width); matrixMulOnHost(h_matrix_A, h_matrix_B, h_matrix_C, width); if (booleanValue) { printf_matrix(h_matrix_A, width); printf_matrix(h_matrix_B, width); printf("\nThe results of CPU Multiplication\n"); printf_matrix(h_matrix_C, width); } //copy the Matrices from Host to Device hipMemcpy(d_matrix_A, h_matrix_A, dsize, hipMemcpyHostToDevice); hipMemcpy(d_matrix_B, h_matrix_B, dsize, hipMemcpyHostToDevice); /*checkCudaErrors(hipMemcpy(d_matrix_A, h_matrix_A, dsize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_matrix_B, h_matrix_B, dsize, hipMemcpyHostToDevice));*/ dim3 dimBlock(blockSize, blockSize); dim3 dimGrid(ceil(((double)width) / dimBlock.x), ceil(((double)width) / dimBlock.y)); //GPU matrixMulKernel << < dimGrid, dimBlock >> >(d_matrix_A, d_matrix_B, d_matrix_C, width); // Copy result from device to host hipMemcpy(h_matrix_final_gpu, d_matrix_C, dsize, hipMemcpyDeviceToHost); //checkCudaErrors(hipMemcpy(h_matrix_final_gpu, d_matrix_C, dsize, hipMemcpyDeviceToHost)); printf("GPU done Multiplying Matrixes\n"); if (booleanValue) { printf("\nThe results of GPU Multiplication\n"); printf_matrix(h_matrix_final_gpu, width); } printf("\nVerifying\n"); verify(h_matrix_C, h_matrix_final_gpu, width); // Clean up memory freeMemory_h(h_matrix_A, h_matrix_B, h_matrix_C, h_matrix_final_gpu); freeMemory_d(d_matrix_A, d_matrix_B, d_matrix_C); } void freeMemory_h(int* h_matrix_A,int* h_matrix_B, int*h_matrix_C, int *h_matrix_final_gpu) { // Clean up memory free(h_matrix_A); free(h_matrix_B); free(h_matrix_C); free(h_matrix_final_gpu); } void freeMemory_d(int* d_matrix_A, int* d_matrix_B, int*d_matrix_C) { // Clean up memory hipFree(d_matrix_A); hipFree(d_matrix_B); hipFree(d_matrix_C); //checkCudaErrors(hipFree(d_matrix_A)); //checkCudaErrors(hipFree(d_matrix_B)); //checkCudaErrors(hipFree(d_matrix_C)); }
3e452c9f22e157dc6b257185f5f7b4012df128b5.cu
//@copyright All rights are reserved, this code/project is not Open Source or Free //@bug None Documented //@author Nathaniel Crossman (U00828694) //@email [email protected] // //@Professor Meilin Liu //@Course_Number CS 4370/6370-90 //@date Thursday, September 26, 2019 // //@project_name: // Task 1: Basic Matrix Addition // Task 2: Basic Matrix Multiplication // System includes #include <stdio.h> #include <math.h> #include <string.h> #include <assert.h> // CUDA runtime #include "cuda_runtime.h" #include <cuda.h> #include "device_launch_parameters.h" //#include <helper_functions.h> //#include <helper_cuda.h> /** * Matrix Addtions (CUDA Kernel) on the device: C = A + B * Matrix are all of the same size dim (2*2, 16*16) */ __global__ void add_matrix_gpu(int *devise_matrix_A, int *devise_matrix_B, int *devise_matrix_C, int width) { int row = blockIdx.x*blockDim.x + threadIdx.x; int col = blockIdx.y*blockDim.y + threadIdx.y; int index = row * width + col; int p_value = 0; if ((row < width) && (col < width)) { p_value = devise_matrix_A[index] + devise_matrix_B[index]; devise_matrix_C[index] = p_value; } } __global__ void matrixMulKernel(int* devise_matrix_A, int* devise_matrix_B, int * devise_matrix_C, int width) { int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int i = 0, j = 0,index = 0; if ((row < width) && (col < width)){ int p_value = 0; for (int k = 0; k < width; ++k) { i = row * width + k; j = k * width + col; p_value += devise_matrix_A[i] * devise_matrix_B[j]; } index = row * width + col; devise_matrix_C[index] = p_value; } } // Kernel functions above //-------------------------------------------------------------------------- void freeMemory_h(int* h_matrix_A, int* h_matrix_B, int*h_matrix_C, int *h_matrix_final_gpu); void freeMemory_d(int* d_matrix_A, int* d_matrix_B, int*d_matrix_C); void add_matrix_cpu(int *matrix_A, int *matrix_B, int *matrix_C, int width); int verify(int *matrix_A, int *matrix_B, int width); void addMatrixMain(); //Helper f int menuShow(); void mainSwitch(int option); void cls(); int debugOn(); void matrixMulOnHost(int* M, int* N, int* P, int width); void multiplicationMatrixMain(); void initializationM(int *h_matrix_A, int *h_matrix_B, int width); //helper for both parts void getBlockSize(int &blockSize); void getWidth(int &width); void initialization(int *h_matrix_A, int *h_matrix_B, int width); void printf_matrix(int *matrix_A, int width); //Above is all prototypes int main() { // This will pick the best possible CUDA capable device, otherwise // override the device ID based on input provided at the command line //int dev = findCudaDevice(argc, (const char **)argv); while (true) { mainSwitch(menuShow()); printf("\n"); } return 0; } int menuShow() { int hold; do { printf("1. Add Matrix \n"); printf("2. Multiply Matrix \n"); printf("3. Quit\n"); printf("---------------------------------------\n"); printf("Enter Choice: "); scanf("%d", &hold); if (hold < 1 || hold > 3) { cls(); } } while (hold < 1 || hold > 3); return hold; } void cls() { for (int i = 0; i < 30; i++) printf("\n"); system("@cls||clear"); } /* This function is like the driver function. It hold the switch statement that called the function. */ void mainSwitch(int option) { switch (option) { case 1: addMatrixMain(); break; case 2: multiplicationMatrixMain(); break; case 3: exit(0); break; } } void getWidth(int &width) { printf("Please specify your square matrix dimension\n"); printf("For example, you could enter 64 and the matrix dimension 64*64\n"); printf("Enter Square Matrix size:"); scanf("%d", &width); cls(); } void getBlockSize(int &blockSize) { printf("Please specify your Block size \n"); printf("For example, you could enter 4 and the block size would be 4 * 4 \n"); printf("Enter Block Size:"); scanf("%d", &blockSize); cls(); } void initialization(int *h_matrix_A, int *h_matrix_B, int width) { int i = 0, j = 0, index = 0; int init = 1325; for (i = 0; i<width; ++i) { for (j = 0; j<width; ++j) { index = i * width + j; init = 3125 * init % 65536; h_matrix_A[index] = (init - 32768) / 16384; h_matrix_B[index] = init % 1000; } } } void initializationM(int *h_matrix_A, int *h_matrix_B, int width) { int i = 0, j = 0, index = 0; int init = 1325; for (i = 0; i<width; ++i) { for (j = 0; j<width; ++j) { index = i * width + j; init = 3125 * init % 65536; h_matrix_A[index] = (init - 32768) / 6553; h_matrix_B[index] = init % 1000; } } } /* We are working on the host so we can use a 2D array like normal */ void add_matrix_cpu(int *matrix_A, int *matrix_B, int *matrix_C, int width) { int i, j, index; //pre-increment operator (++i) merely increments and returns. faster then i++ for (i = 0; i < width; ++i) { //row = y for (j = 0; j < width; ++j) { //col = x index = i * width + j; //index = i + j * width; matrix_C[index] = matrix_A[index] + matrix_B[index]; } } } void printf_matrix(int *matrix_A, int width) { int i, j, index; for (i = 0; i < width; ++i) { for (j = 0; j < width; ++j) { index = i * width + j; printf("%d \t", matrix_A[index]); } printf("\n"); } printf("\n"); } int verify(int *matrix_A, int *matrix_B, int width) { int index = 0; for (int i = 0; i < width; i++){ for (int j = 0; j < width; j++){ //index = i + j * width; index = i * width + j; if (matrix_A[index] != matrix_B[index]){ printf("Test failed\n"); return 0; } } } printf("The Test Passed\n"); return 1; } void matrixMulOnHost(int* M, int* N, int* P, int width){ for (int i = 0; i<width; i++) { for (int j = 0; j < width; ++j) { int sum = 0; for (int k = 0; k < width; ++k) { int a = M[i * width + k]; int b = N[k * width + j]; sum += a * b; } P[i * width + j] = sum; } } } int debugOn() { int hold; do { printf("\nRun in debug mode?\n"); printf("Debug mode prints out alot of helpful info,\nbut it can takes a long time with big matrixes\n"); printf("Enter 1 for Yes and 0 for No:"); scanf("%d", &hold); if (hold < 0 || hold > 1) { cls(); } } while (hold < 0 || hold > 1); cls(); return hold; } void addMatrixMain() { int width = 0, blockSize = 0; int *h_matrix_A, *h_matrix_B, *h_matrix_C; int *d_matrix_A, *d_matrix_B, *d_matrix_C; int * h_matrix_final_gpu; int booleanValue = debugOn(); getWidth(width); getBlockSize(blockSize); printf("Matrix Size: %d * %d \nSize of Thread block: %d * %d", width, width, blockSize, blockSize); printf("\n\n"); printf("Adding...\n"); //The size of all matrixes size_t dsize = (width * width) * sizeof(int); //Allocate memory for matrices on host h_matrix_A = (int*)malloc(dsize); h_matrix_B = (int*)malloc(dsize); h_matrix_C = (int*)malloc(dsize); h_matrix_final_gpu = (int*)malloc(dsize); //Set all matrices to 0 , Not needed but used for testing memset(h_matrix_A, 0, dsize); memset(h_matrix_B, 0, dsize); memset(h_matrix_C, 0, dsize); //Allocate memory for device Matrix cudaMalloc((void **)(&d_matrix_A), dsize); cudaMalloc((void **)(&d_matrix_B), dsize); cudaMalloc((void **)(&d_matrix_C), dsize); //checkCudaErrors(cudaMalloc((void **)(&d_matrix_A), dsize)); //checkCudaErrors(cudaMalloc((void **)(&d_matrix_B), dsize)); //checkCudaErrors(cudaMalloc((void **)(&d_matrix_C), dsize)); initialization(h_matrix_A, h_matrix_B, width); add_matrix_cpu(h_matrix_A, h_matrix_B, h_matrix_C, width); if (booleanValue) { printf_matrix(h_matrix_A, width); printf_matrix(h_matrix_B, width); printf("\nThe results of CPU addition\n"); printf_matrix(h_matrix_C, width); } //copy the Matrices from Host to Device cudaMemcpy(d_matrix_A, h_matrix_A, dsize, cudaMemcpyHostToDevice); cudaMemcpy(d_matrix_B, h_matrix_B, dsize, cudaMemcpyHostToDevice); /*checkCudaErrors(cudaMemcpy(d_matrix_A, h_matrix_A, dsize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_matrix_B, h_matrix_B, dsize, cudaMemcpyHostToDevice));*/ dim3 dimBlock(blockSize, blockSize); dim3 dimGrid(ceil(((double)width) / dimBlock.x), ceil(((double)width) / dimBlock.y)); //GPU add_matrix_gpu << < dimGrid, dimBlock >> >(d_matrix_A, d_matrix_B, d_matrix_C, width); // Copy result from device to host cudaMemcpy(h_matrix_final_gpu, d_matrix_C, dsize, cudaMemcpyDeviceToHost); //checkCudaErrors(cudaMemcpy(h_matrix_final_gpu, d_matrix_C, dsize, cudaMemcpyHostToDevice)); printf("GPU done Addition\n"); if (booleanValue) { printf("\nThe results of GPU Addition\n"); printf_matrix(h_matrix_final_gpu, width); } printf("\nVerifying\n"); verify(h_matrix_C, h_matrix_final_gpu, width); // Clean up memory freeMemory_h(h_matrix_A, h_matrix_B, h_matrix_C, h_matrix_final_gpu); freeMemory_d(d_matrix_A, d_matrix_B, d_matrix_C); } void multiplicationMatrixMain() { int width = 0, blockSize = 0; int *h_matrix_A, *h_matrix_B, *h_matrix_C; int *d_matrix_A, *d_matrix_B, *d_matrix_C; int * h_matrix_final_gpu; int booleanValue = debugOn(); getWidth(width); getBlockSize(blockSize); printf("Matrix Size: %d * %d \nSize of Thread block: %d * %d", width, width, blockSize, blockSize); printf("\n\n"); printf("multiplying....\n"); //The size of all matrixes size_t dsize = (width * width) * sizeof(int); //Allocate memory for matrices on host h_matrix_A = (int*)malloc(dsize); h_matrix_B = (int*)malloc(dsize); h_matrix_C = (int*)malloc(dsize); h_matrix_final_gpu = (int*)malloc(dsize); //Set all matrices to 0 , Not needed but used for testing memset(h_matrix_A, 0, dsize); memset(h_matrix_B, 0, dsize); memset(h_matrix_C, 0, dsize); //Allocate memory for device Matrix cudaMalloc((void **)(&d_matrix_A), dsize); cudaMalloc((void **)(&d_matrix_B), dsize); cudaMalloc((void **)(&d_matrix_C), dsize); /*checkCudaErrors(cudaMalloc((void **)(&d_matrix_A), dsize)); checkCudaErrors(cudaMalloc((void **)(&d_matrix_B), dsize)); checkCudaErrors(cudaMalloc((void **)(&d_matrix_C), dsize));*/ initializationM(h_matrix_A, h_matrix_B, width); matrixMulOnHost(h_matrix_A, h_matrix_B, h_matrix_C, width); if (booleanValue) { printf_matrix(h_matrix_A, width); printf_matrix(h_matrix_B, width); printf("\nThe results of CPU Multiplication\n"); printf_matrix(h_matrix_C, width); } //copy the Matrices from Host to Device cudaMemcpy(d_matrix_A, h_matrix_A, dsize, cudaMemcpyHostToDevice); cudaMemcpy(d_matrix_B, h_matrix_B, dsize, cudaMemcpyHostToDevice); /*checkCudaErrors(cudaMemcpy(d_matrix_A, h_matrix_A, dsize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_matrix_B, h_matrix_B, dsize, cudaMemcpyHostToDevice));*/ dim3 dimBlock(blockSize, blockSize); dim3 dimGrid(ceil(((double)width) / dimBlock.x), ceil(((double)width) / dimBlock.y)); //GPU matrixMulKernel << < dimGrid, dimBlock >> >(d_matrix_A, d_matrix_B, d_matrix_C, width); // Copy result from device to host cudaMemcpy(h_matrix_final_gpu, d_matrix_C, dsize, cudaMemcpyDeviceToHost); //checkCudaErrors(cudaMemcpy(h_matrix_final_gpu, d_matrix_C, dsize, cudaMemcpyDeviceToHost)); printf("GPU done Multiplying Matrixes\n"); if (booleanValue) { printf("\nThe results of GPU Multiplication\n"); printf_matrix(h_matrix_final_gpu, width); } printf("\nVerifying\n"); verify(h_matrix_C, h_matrix_final_gpu, width); // Clean up memory freeMemory_h(h_matrix_A, h_matrix_B, h_matrix_C, h_matrix_final_gpu); freeMemory_d(d_matrix_A, d_matrix_B, d_matrix_C); } void freeMemory_h(int* h_matrix_A,int* h_matrix_B, int*h_matrix_C, int *h_matrix_final_gpu) { // Clean up memory free(h_matrix_A); free(h_matrix_B); free(h_matrix_C); free(h_matrix_final_gpu); } void freeMemory_d(int* d_matrix_A, int* d_matrix_B, int*d_matrix_C) { // Clean up memory cudaFree(d_matrix_A); cudaFree(d_matrix_B); cudaFree(d_matrix_C); //checkCudaErrors(cudaFree(d_matrix_A)); //checkCudaErrors(cudaFree(d_matrix_B)); //checkCudaErrors(cudaFree(d_matrix_C)); }
6f369531ccc64426311ed0afdde6de060831ca09.hip
// !!! This is a file automatically generated by hipify!!! #include "../include/fft_related.cuh" #include "../core/include/cuda_helper.h" #include <hipfft.h> namespace fdsp { GPUArray<hipComplex> FourierTransform(const GPUArray<float>& array, int axis) { int rank = array.ndims; int *n = (int*)malloc(sizeof(int)*rank); std::vector<size_t> temp = array.GetDimensionSizes(); for(int i = 0; i < rank; i++) n[i] = (int)temp[i]; int size = array.GetSize(); GPUArray<hipComplex> out(array.GetDimensionSizes()); hipfftHandle plan; CHECK_CUFFT(hipfftCreate(&plan)); switch (axis) { case -1: { CHECK_CUFFT(hipfftPlanMany(&plan, rank, (int*)n, NULL, 1, size, NULL, 1, size, HIPFFT_R2C, 1)); break; } case 0: CHECK_CUFFT(hipfftPlanMany(&plan, rank, (int*)n, (int*)n, n[0], 1, (int*)n, n[0], 1, HIPFFT_R2C, n[rank-1])); break; default: break; } CHECK_CUFFT(hipfftExecR2C(plan, const_cast<float*>(array.GetPointerToArrayConst()), out.GetPointerToArray())); CHECK(hipDeviceSynchronize()); CHECK_CUFFT(hipfftDestroy(plan)); free(n); return out; } }
6f369531ccc64426311ed0afdde6de060831ca09.cu
#include "../include/fft_related.cuh" #include "../core/include/cuda_helper.h" #include <cufft.h> namespace fdsp { GPUArray<cuComplex> FourierTransform(const GPUArray<float>& array, int axis) { int rank = array.ndims; int *n = (int*)malloc(sizeof(int)*rank); std::vector<size_t> temp = array.GetDimensionSizes(); for(int i = 0; i < rank; i++) n[i] = (int)temp[i]; int size = array.GetSize(); GPUArray<cuComplex> out(array.GetDimensionSizes()); cufftHandle plan; CHECK_CUFFT(cufftCreate(&plan)); switch (axis) { case -1: { CHECK_CUFFT(cufftPlanMany(&plan, rank, (int*)n, NULL, 1, size, NULL, 1, size, CUFFT_R2C, 1)); break; } case 0: CHECK_CUFFT(cufftPlanMany(&plan, rank, (int*)n, (int*)n, n[0], 1, (int*)n, n[0], 1, CUFFT_R2C, n[rank-1])); break; default: break; } CHECK_CUFFT(cufftExecR2C(plan, const_cast<float*>(array.GetPointerToArrayConst()), out.GetPointerToArray())); CHECK(cudaDeviceSynchronize()); CHECK_CUFFT(cufftDestroy(plan)); free(n); return out; } }
39b13113795b6569958dcf69b120074b04ad5978.hip
// !!! This is a file automatically generated by hipify!!! #include "KCUDASurfaceContainer.hh" #include <limits.h> #include <algorithm> #include <iostream> #include "KCUDABufferStreamer.hh" namespace KEMField { KCUDASurfaceContainer::KCUDASurfaceContainer(const KSurfaceContainer& container) : KSortedSurfaceContainer(container), KCUDAData(), fNBufferedElements(0), fShapeSize(0), fBoundarySize(0), fBasisSize(0), fDeviceShapeInfo(NULL), fDeviceShapeData(NULL), fDeviceBoundaryInfo(NULL), fDeviceBoundaryData(NULL), fDeviceBasisData(NULL) { // Acquire the maximum buffer sizes for the shape, boundary and basis policies KSurfaceSize<KShape> shapeSize; KSurfaceSize<KBoundary> boundarySize; KSurfaceSize<KBasis> basisSize; FlagGenerator flagGenerator; std::vector<unsigned short> shapes; std::vector<unsigned short> boundaries; for (unsigned int i=0;i<container.NumberOfSurfaceTypes();i++) { KSurfacePrimitive* sP = container.FirstSurfaceType(i); // Shape size (maximal) shapeSize.Reset(); shapeSize.SetSurface( sP->GetShape() ); KShapeAction<KSurfaceSize<KShape> >::ActOnShapeType( sP->GetID(), shapeSize ); if( shapeSize.size()>fShapeSize ) fShapeSize = shapeSize.size(); // Shape type (triangle, rectangle, line segment, conic section) KShapeAction<FlagGenerator>::ActOnShapeType( sP->GetID(), flagGenerator ); if( std::find(shapes.begin(),shapes.end(),sP->GetID().ShapeID) == shapes.end() ) { shapes.push_back(sP->GetID().ShapeID); } // Boundary size (maximal) boundarySize.Reset(); boundarySize.SetSurface( sP->GetBoundary() ); KBoundaryAction<KSurfaceSize<KBoundary> >::ActOnBoundaryType( sP->GetID(),boundarySize ); if( boundarySize.size()>fBoundarySize ) fBoundarySize = boundarySize.size(); // Boundary type (Dirichlet, Neumann) KBoundaryAction<FlagGenerator>::ActOnBoundaryType( sP->GetID(),flagGenerator ); if( std::find(boundaries.begin(),boundaries.end(),sP->GetID().BoundaryID) == boundaries.end() ) { boundaries.push_back(sP->GetID().BoundaryID); } // Basis size basisSize.Reset(); basisSize.SetSurface(sP->GetBasis()); KBasisAction<KSurfaceSize<KBasis> >::ActOnBasisType(sP->GetID(),basisSize); if( basisSize.size()>fBasisSize ) fBasisSize = basisSize.size(); } } KCUDASurfaceContainer::~KCUDASurfaceContainer() { if (fDeviceShapeInfo) hipFree(fDeviceShapeInfo); if (fDeviceShapeData) hipFree(fDeviceShapeData); if (fDeviceBoundaryInfo) hipFree(fDeviceBoundaryInfo); if (fDeviceBoundaryData) hipFree(fDeviceBoundaryData); if (fDeviceBasisData) hipFree(fDeviceBasisData); } void KCUDASurfaceContainer::BuildCUDAObjects() { // First, we fill a vector with shape data unsigned int nDummy = 0; unsigned int tmp = fNLocal - (size()%fNLocal); if (tmp != fNLocal) nDummy += tmp; fNBufferedElements = size() + nDummy; fShapeInfo.resize(fNBufferedElements,-1); fShapeData.resize(fShapeSize*fNBufferedElements,0.); KCUDABufferPolicyStreamer<KShape> shapeStreamer; shapeStreamer.SetBufferSize(fShapeSize); shapeStreamer.SetBuffer(&fShapeData[0]); for( unsigned int i=0; i<size(); i++ ) { fShapeInfo[i] = at(i)->GetID().ShapeID; shapeStreamer.SetSurfacePolicy(at(i)->GetShape()); KShapeAction<KCUDABufferPolicyStreamer<KShape> >::ActOnShapeType(at(i)->GetID(),shapeStreamer); } // Next, we fill a vector with boundary information fBoundaryInfo.resize(3*NUniqueBoundaries()+2); fBoundaryInfo[0] = size(); fBoundaryInfo[1] = NUniqueBoundaries(); for( unsigned int i=0; i<NUniqueBoundaries(); i++ ) { fBoundaryInfo[2 + i*3] = size(i); fBoundaryInfo[2 + i*3 + 1] = BoundaryType(i); fBoundaryInfo[2 + i*3 + 2] = IndexOfFirstSurface(i); } // Next, we fill a vector with the actual boundary data fBoundaryData.resize(fBoundarySize*NUniqueBoundaries()); KCUDABufferPolicyStreamer<KBoundary> boundaryStreamer; boundaryStreamer.SetBufferSize(fBoundarySize); boundaryStreamer.SetBuffer(&fBoundaryData[0]); for( unsigned int i=0; i<NUniqueBoundaries(); i++ ) { unsigned int index = IndexOfFirstSurface(i); boundaryStreamer.SetSurfacePolicy(at(index)->GetBoundary()); KBoundaryAction<KCUDABufferPolicyStreamer<KBoundary> >::ActOnBoundaryType(at(index)->GetID(),boundaryStreamer); } // Finally, we fill a vector with the basis data fBasisData.resize(fBasisSize*fNBufferedElements,0.); KCUDABufferPolicyStreamer<KBasis> basisStreamer; basisStreamer.SetBufferSize(fBasisSize); basisStreamer.SetBuffer(&fBasisData[0]); for( unsigned int i=0;i<size();i++ ) { basisStreamer.SetSurfacePolicy(at(i)->GetBasis()); KBasisAction<KCUDABufferPolicyStreamer<KBasis> >::ActOnBasisType(at(i)->GetID(),basisStreamer); } // Now that the data is in array form, we can allocate the device memory hipMalloc((void**) &fDeviceShapeInfo, fShapeInfo.size()*sizeof(short)); hipMalloc((void**) &fDeviceShapeData, fShapeData.size()*sizeof(CU_TYPE)); hipMalloc((void**) &fDeviceBoundaryInfo, fBoundaryInfo.size()*sizeof(int)); hipMalloc((void**) &fDeviceBoundaryData, fBasisData.size()*sizeof(CU_TYPE)); hipMalloc((void**) &fDeviceBasisData, fBasisData.size()*sizeof(CU_TYPE)); // Write to device memory hipMemcpy(fDeviceShapeInfo, &fShapeInfo[0], fShapeInfo.size()*sizeof(short), hipMemcpyHostToDevice ); hipMemcpy(fDeviceShapeData, &fShapeData[0], fShapeData.size()*sizeof(CU_TYPE), hipMemcpyHostToDevice ); hipMemcpy(fDeviceBoundaryInfo, &fBoundaryInfo[0], fBoundaryInfo.size()*sizeof(int), hipMemcpyHostToDevice ); hipMemcpy(fDeviceBoundaryData, &fBoundaryData[0], fBoundaryData.size()*sizeof(CU_TYPE), hipMemcpyHostToDevice ); hipMemcpy(fDeviceBasisData, &fBasisData[0], fBasisData.size()*sizeof(CU_TYPE), hipMemcpyHostToDevice ); fIsConstructed = true; } void KCUDASurfaceContainer::ReadBasisData() { hipMemcpy( &fBasisData[0], fDeviceBasisData, fBasisData.size()*sizeof(CU_TYPE), hipMemcpyDeviceToHost ); KCUDABufferPolicyStreamer<KBasis> basisStreamer; basisStreamer.SetToRead(); basisStreamer.SetBufferSize(fBasisSize); basisStreamer.SetBuffer(&fBasisData[0]); for( unsigned int i=0;i<size();i++ ) { basisStreamer.SetSurfacePolicy(at(i)->GetBasis()); KBasisAction<KCUDABufferPolicyStreamer<KBasis> >::ActOnBasisType(at(i)->GetID(),basisStreamer); } } }
39b13113795b6569958dcf69b120074b04ad5978.cu
#include "KCUDASurfaceContainer.hh" #include <limits.h> #include <algorithm> #include <iostream> #include "KCUDABufferStreamer.hh" namespace KEMField { KCUDASurfaceContainer::KCUDASurfaceContainer(const KSurfaceContainer& container) : KSortedSurfaceContainer(container), KCUDAData(), fNBufferedElements(0), fShapeSize(0), fBoundarySize(0), fBasisSize(0), fDeviceShapeInfo(NULL), fDeviceShapeData(NULL), fDeviceBoundaryInfo(NULL), fDeviceBoundaryData(NULL), fDeviceBasisData(NULL) { // Acquire the maximum buffer sizes for the shape, boundary and basis policies KSurfaceSize<KShape> shapeSize; KSurfaceSize<KBoundary> boundarySize; KSurfaceSize<KBasis> basisSize; FlagGenerator flagGenerator; std::vector<unsigned short> shapes; std::vector<unsigned short> boundaries; for (unsigned int i=0;i<container.NumberOfSurfaceTypes();i++) { KSurfacePrimitive* sP = container.FirstSurfaceType(i); // Shape size (maximal) shapeSize.Reset(); shapeSize.SetSurface( sP->GetShape() ); KShapeAction<KSurfaceSize<KShape> >::ActOnShapeType( sP->GetID(), shapeSize ); if( shapeSize.size()>fShapeSize ) fShapeSize = shapeSize.size(); // Shape type (triangle, rectangle, line segment, conic section) KShapeAction<FlagGenerator>::ActOnShapeType( sP->GetID(), flagGenerator ); if( std::find(shapes.begin(),shapes.end(),sP->GetID().ShapeID) == shapes.end() ) { shapes.push_back(sP->GetID().ShapeID); } // Boundary size (maximal) boundarySize.Reset(); boundarySize.SetSurface( sP->GetBoundary() ); KBoundaryAction<KSurfaceSize<KBoundary> >::ActOnBoundaryType( sP->GetID(),boundarySize ); if( boundarySize.size()>fBoundarySize ) fBoundarySize = boundarySize.size(); // Boundary type (Dirichlet, Neumann) KBoundaryAction<FlagGenerator>::ActOnBoundaryType( sP->GetID(),flagGenerator ); if( std::find(boundaries.begin(),boundaries.end(),sP->GetID().BoundaryID) == boundaries.end() ) { boundaries.push_back(sP->GetID().BoundaryID); } // Basis size basisSize.Reset(); basisSize.SetSurface(sP->GetBasis()); KBasisAction<KSurfaceSize<KBasis> >::ActOnBasisType(sP->GetID(),basisSize); if( basisSize.size()>fBasisSize ) fBasisSize = basisSize.size(); } } KCUDASurfaceContainer::~KCUDASurfaceContainer() { if (fDeviceShapeInfo) cudaFree(fDeviceShapeInfo); if (fDeviceShapeData) cudaFree(fDeviceShapeData); if (fDeviceBoundaryInfo) cudaFree(fDeviceBoundaryInfo); if (fDeviceBoundaryData) cudaFree(fDeviceBoundaryData); if (fDeviceBasisData) cudaFree(fDeviceBasisData); } void KCUDASurfaceContainer::BuildCUDAObjects() { // First, we fill a vector with shape data unsigned int nDummy = 0; unsigned int tmp = fNLocal - (size()%fNLocal); if (tmp != fNLocal) nDummy += tmp; fNBufferedElements = size() + nDummy; fShapeInfo.resize(fNBufferedElements,-1); fShapeData.resize(fShapeSize*fNBufferedElements,0.); KCUDABufferPolicyStreamer<KShape> shapeStreamer; shapeStreamer.SetBufferSize(fShapeSize); shapeStreamer.SetBuffer(&fShapeData[0]); for( unsigned int i=0; i<size(); i++ ) { fShapeInfo[i] = at(i)->GetID().ShapeID; shapeStreamer.SetSurfacePolicy(at(i)->GetShape()); KShapeAction<KCUDABufferPolicyStreamer<KShape> >::ActOnShapeType(at(i)->GetID(),shapeStreamer); } // Next, we fill a vector with boundary information fBoundaryInfo.resize(3*NUniqueBoundaries()+2); fBoundaryInfo[0] = size(); fBoundaryInfo[1] = NUniqueBoundaries(); for( unsigned int i=0; i<NUniqueBoundaries(); i++ ) { fBoundaryInfo[2 + i*3] = size(i); fBoundaryInfo[2 + i*3 + 1] = BoundaryType(i); fBoundaryInfo[2 + i*3 + 2] = IndexOfFirstSurface(i); } // Next, we fill a vector with the actual boundary data fBoundaryData.resize(fBoundarySize*NUniqueBoundaries()); KCUDABufferPolicyStreamer<KBoundary> boundaryStreamer; boundaryStreamer.SetBufferSize(fBoundarySize); boundaryStreamer.SetBuffer(&fBoundaryData[0]); for( unsigned int i=0; i<NUniqueBoundaries(); i++ ) { unsigned int index = IndexOfFirstSurface(i); boundaryStreamer.SetSurfacePolicy(at(index)->GetBoundary()); KBoundaryAction<KCUDABufferPolicyStreamer<KBoundary> >::ActOnBoundaryType(at(index)->GetID(),boundaryStreamer); } // Finally, we fill a vector with the basis data fBasisData.resize(fBasisSize*fNBufferedElements,0.); KCUDABufferPolicyStreamer<KBasis> basisStreamer; basisStreamer.SetBufferSize(fBasisSize); basisStreamer.SetBuffer(&fBasisData[0]); for( unsigned int i=0;i<size();i++ ) { basisStreamer.SetSurfacePolicy(at(i)->GetBasis()); KBasisAction<KCUDABufferPolicyStreamer<KBasis> >::ActOnBasisType(at(i)->GetID(),basisStreamer); } // Now that the data is in array form, we can allocate the device memory cudaMalloc((void**) &fDeviceShapeInfo, fShapeInfo.size()*sizeof(short)); cudaMalloc((void**) &fDeviceShapeData, fShapeData.size()*sizeof(CU_TYPE)); cudaMalloc((void**) &fDeviceBoundaryInfo, fBoundaryInfo.size()*sizeof(int)); cudaMalloc((void**) &fDeviceBoundaryData, fBasisData.size()*sizeof(CU_TYPE)); cudaMalloc((void**) &fDeviceBasisData, fBasisData.size()*sizeof(CU_TYPE)); // Write to device memory cudaMemcpy(fDeviceShapeInfo, &fShapeInfo[0], fShapeInfo.size()*sizeof(short), cudaMemcpyHostToDevice ); cudaMemcpy(fDeviceShapeData, &fShapeData[0], fShapeData.size()*sizeof(CU_TYPE), cudaMemcpyHostToDevice ); cudaMemcpy(fDeviceBoundaryInfo, &fBoundaryInfo[0], fBoundaryInfo.size()*sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy(fDeviceBoundaryData, &fBoundaryData[0], fBoundaryData.size()*sizeof(CU_TYPE), cudaMemcpyHostToDevice ); cudaMemcpy(fDeviceBasisData, &fBasisData[0], fBasisData.size()*sizeof(CU_TYPE), cudaMemcpyHostToDevice ); fIsConstructed = true; } void KCUDASurfaceContainer::ReadBasisData() { cudaMemcpy( &fBasisData[0], fDeviceBasisData, fBasisData.size()*sizeof(CU_TYPE), cudaMemcpyDeviceToHost ); KCUDABufferPolicyStreamer<KBasis> basisStreamer; basisStreamer.SetToRead(); basisStreamer.SetBufferSize(fBasisSize); basisStreamer.SetBuffer(&fBasisData[0]); for( unsigned int i=0;i<size();i++ ) { basisStreamer.SetSurfacePolicy(at(i)->GetBasis()); KBasisAction<KCUDABufferPolicyStreamer<KBasis> >::ActOnBasisType(at(i)->GetID(),basisStreamer); } } }
0caedf7d98c2f9b61948317900083fe82f8e8fb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/kernel_util.cuh" namespace oneflow { namespace { template<typename T> __global__ void FakeQuantizationSymmetric(const T* in_ptr, const T* scale_ptr, const int64_t scale_size, const int64_t elements, const int64_t panel_size, const double quantization_bit, T* out_ptr) { int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; int64_t step = gridDim.x * blockDim.x; T upper_bound = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; T lower_bound = -upper_bound - 1; while (gid < elements) { int64_t channel_index = gid / panel_size; int64_t scale_idx = min(scale_size - 1, channel_index); T scale = scale_ptr[scale_idx]; T out = nearbyint(in_ptr[gid] / scale); out = out > upper_bound ? upper_bound : out; out = out < lower_bound ? lower_bound : out; out_ptr[gid] = out * scale; gid += step; } } template<typename T> __global__ void FakeQuantizationAffine(const T* in_ptr, const T* scale_ptr, const T* zero_point_ptr, const int64_t scale_size, const int64_t elements, const int64_t panel_size, const double quantization_bit, T* out_ptr) { int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; int64_t step = gridDim.x * blockDim.x; T upper_bound = static_cast<T>(pow(2.0, quantization_bit)) - 1; T lower_bound = 0; while (gid < elements) { int64_t channel_index = gid / panel_size; int64_t scale_idx = min(scale_size - 1, channel_index); T scale = scale_ptr[scale_idx]; T zero_point = zero_point_ptr[scale_idx]; T out = nearbyint(in_ptr[gid] / scale + zero_point); out = out > upper_bound ? upper_bound : out; out = out < lower_bound ? lower_bound : out; out_ptr[gid] = (out - zero_point) * scale; gid += step; } } template<typename T> __global__ void FakeQuantizationCambricon(const T* in_ptr, const T* shift, const int64_t scale_size, const int64_t elements, const int64_t panel_size, const double quantization_bit, T* out_ptr) { int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; int64_t step = gridDim.x * blockDim.x; T upper_bound = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; T lower_bound = -upper_bound - 1; T scale = static_cast<T>(pow(2.0, static_cast<int32_t>(shift[0]))); while (gid < elements) { T out = nearbyint(in_ptr[gid] / scale); out = out > upper_bound ? upper_bound : out; out = out < lower_bound ? lower_bound : out; out_ptr[gid] = out * scale; gid += step; } } } // namespace template<typename T> class GpuFakeQuantizationKernel final : public user_op::OpKernel { public: GpuFakeQuantizationKernel() = default; ~GpuFakeQuantizationKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); const user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0); const user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme"); const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit"); const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula"); const int64_t elements = in->shape().elem_cnt(); const int64_t panel_size = in->shape().Count(1); const int64_t scale_size = scale->shape().elem_cnt(); // round to even auto origin_round_mode = std::fegetround(); std::fesetround(FE_TONEAREST); if (quantization_formula == "google") { if (quantization_scheme == "symmetric") { RUN_CUDA_KERNEL((FakeQuantizationSymmetric<T>), ctx->device_ctx(), elements, in->dptr<T>(), scale->dptr<T>(), scale_size, elements, panel_size, quantization_bit, out->mut_dptr<T>()); } else { // quantization_scheme == "affine" RUN_CUDA_KERNEL((FakeQuantizationAffine<T>), ctx->device_ctx(), elements, in->dptr<T>(), scale->dptr<T>(), zero_point->dptr<T>(), scale_size, elements, panel_size, quantization_bit, out->mut_dptr<T>()); } } else if (quantization_formula == "cambricon") { RUN_CUDA_KERNEL((FakeQuantizationCambricon<T>), ctx->device_ctx(), elements, in->dptr<T>(), scale->dptr<T>(), scale_size, elements, panel_size, quantization_bit, out->mut_dptr<T>()); } else { UNIMPLEMENTED(); } std::fesetround(origin_round_mode); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_FAKE_QUANTIZATION_KERNEL(dtype) \ REGISTER_USER_KERNEL("fake_quantization") \ .SetCreateFn<GpuFakeQuantizationKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \ & (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) REGISTER_FAKE_QUANTIZATION_KERNEL(float); REGISTER_FAKE_QUANTIZATION_KERNEL(double); } // namespace oneflow
0caedf7d98c2f9b61948317900083fe82f8e8fb4.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/kernel_util.cuh" namespace oneflow { namespace { template<typename T> __global__ void FakeQuantizationSymmetric(const T* in_ptr, const T* scale_ptr, const int64_t scale_size, const int64_t elements, const int64_t panel_size, const double quantization_bit, T* out_ptr) { int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; int64_t step = gridDim.x * blockDim.x; T upper_bound = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; T lower_bound = -upper_bound - 1; while (gid < elements) { int64_t channel_index = gid / panel_size; int64_t scale_idx = min(scale_size - 1, channel_index); T scale = scale_ptr[scale_idx]; T out = nearbyint(in_ptr[gid] / scale); out = out > upper_bound ? upper_bound : out; out = out < lower_bound ? lower_bound : out; out_ptr[gid] = out * scale; gid += step; } } template<typename T> __global__ void FakeQuantizationAffine(const T* in_ptr, const T* scale_ptr, const T* zero_point_ptr, const int64_t scale_size, const int64_t elements, const int64_t panel_size, const double quantization_bit, T* out_ptr) { int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; int64_t step = gridDim.x * blockDim.x; T upper_bound = static_cast<T>(pow(2.0, quantization_bit)) - 1; T lower_bound = 0; while (gid < elements) { int64_t channel_index = gid / panel_size; int64_t scale_idx = min(scale_size - 1, channel_index); T scale = scale_ptr[scale_idx]; T zero_point = zero_point_ptr[scale_idx]; T out = nearbyint(in_ptr[gid] / scale + zero_point); out = out > upper_bound ? upper_bound : out; out = out < lower_bound ? lower_bound : out; out_ptr[gid] = (out - zero_point) * scale; gid += step; } } template<typename T> __global__ void FakeQuantizationCambricon(const T* in_ptr, const T* shift, const int64_t scale_size, const int64_t elements, const int64_t panel_size, const double quantization_bit, T* out_ptr) { int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; int64_t step = gridDim.x * blockDim.x; T upper_bound = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; T lower_bound = -upper_bound - 1; T scale = static_cast<T>(pow(2.0, static_cast<int32_t>(shift[0]))); while (gid < elements) { T out = nearbyint(in_ptr[gid] / scale); out = out > upper_bound ? upper_bound : out; out = out < lower_bound ? lower_bound : out; out_ptr[gid] = out * scale; gid += step; } } } // namespace template<typename T> class GpuFakeQuantizationKernel final : public user_op::OpKernel { public: GpuFakeQuantizationKernel() = default; ~GpuFakeQuantizationKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); const user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0); const user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme"); const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit"); const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula"); const int64_t elements = in->shape().elem_cnt(); const int64_t panel_size = in->shape().Count(1); const int64_t scale_size = scale->shape().elem_cnt(); // round to even auto origin_round_mode = std::fegetround(); std::fesetround(FE_TONEAREST); if (quantization_formula == "google") { if (quantization_scheme == "symmetric") { RUN_CUDA_KERNEL((FakeQuantizationSymmetric<T>), ctx->device_ctx(), elements, in->dptr<T>(), scale->dptr<T>(), scale_size, elements, panel_size, quantization_bit, out->mut_dptr<T>()); } else { // quantization_scheme == "affine" RUN_CUDA_KERNEL((FakeQuantizationAffine<T>), ctx->device_ctx(), elements, in->dptr<T>(), scale->dptr<T>(), zero_point->dptr<T>(), scale_size, elements, panel_size, quantization_bit, out->mut_dptr<T>()); } } else if (quantization_formula == "cambricon") { RUN_CUDA_KERNEL((FakeQuantizationCambricon<T>), ctx->device_ctx(), elements, in->dptr<T>(), scale->dptr<T>(), scale_size, elements, panel_size, quantization_bit, out->mut_dptr<T>()); } else { UNIMPLEMENTED(); } std::fesetround(origin_round_mode); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_FAKE_QUANTIZATION_KERNEL(dtype) \ REGISTER_USER_KERNEL("fake_quantization") \ .SetCreateFn<GpuFakeQuantizationKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \ & (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) REGISTER_FAKE_QUANTIZATION_KERNEL(float); REGISTER_FAKE_QUANTIZATION_KERNEL(double); } // namespace oneflow
d30ff368b61cccaccf817ceb8b34f727fd4fd7e1.hip
// !!! This is a file automatically generated by hipify!!! #include "config.h" #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; //#define cfd_SUPER_BLOCKS_PER_SM 5 //const int BLOCK_SIZE = 256; const int cfd_nBlksPerCluster = 16; const int cfd_nAtom = BLOCK_SIZE * MSIZE; const int cfd_maxNeighbors = 8; texture<float,1,hipReadModeElementType> tex_my; texture<float,1,hipReadModeElementType> tex_mz; texture<float,1,hipReadModeElementType> tex_energy; inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom, int* neighborList, int blockSz) { //create non-uniform data sharing //but avoid that tasks sharing the same data are neighbor tasks by randomization vector<int> atomInds(nAtom); vector<int> blkInds((nAtom+blockSz-1)/blockSz); for(int i=0; i<blkInds.size(); ++i) blkInds[i] = i; random_shuffle(blkInds.begin(), blkInds.end()); int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int)); for(int i=0; i<blkInds.size(); ++i) blkOrder[i] = blkInds[i]; int j=0; for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it) { int blkInd = *it; for(int i=0; i<blockSz; ++i) atomInds[j++] = blkInd*blockSz + i; } int superBlockSz = blockSz * cfd_nBlksPerCluster; // Build Neighbor List for (int i = 0; i < nAtom; i++) { int start = i - i%superBlockSz; //difference is here //int end = i + (superBlockSz - i%superBlockSz)-1; int nNeighbors = 0; do { int j = start + rand() % superBlockSz; if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j]; nNeighbors ++; } while(nNeighbors<cfd_maxNeighbors); } return blkOrder; } #define GAMMA 1.4f #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define NDIM 3 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) __host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __host__ __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } void check_cfd(int nelr, int* elements_surrounding_elements, float* normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes) { const float smoothing_coefficient = float(0.2f); //const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int i=0;i<MSIZE*BLOCK_SIZE;i++){ int j, nb; float3 normal; float normal_len; float factor; //float density_i = variables[i + VAR_DENSITY*nelr]; float density_i = density[i]; float3 momentum_i; //momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; //momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; //momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; momentum_i.x = mx[i]; momentum_i.y = my[i]; momentum_i.z = mz[i]; //float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float density_energy_i = density_energy[i]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); //float flux_i_density = float(0.0f); float flux_i_density = 0.0; float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < cfd_maxNeighbors; j++) { nb = elements_surrounding_elements[i + j*nelr]; //optimal layout already // |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ... // |Z for neighbor 0, Z for neighbor 1, ... | normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr]; normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr]; normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { //density_nb = variables[nb + VAR_DENSITY*nelr]; //momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; //momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; //momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_nb = density[nb]; momentum_nb.x = mx[nb]; momentum_nb.y = my[nb]; momentum_nb.z = mz[nb]; //density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; density_energy_nb = density_energy[nb]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity //factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); factor = 1.3; flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } } /*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\ ((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/ if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\ ((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01))) {printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\ fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\ fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\ fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\ fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy); return;} } printf("GOOD! passed!\n"); return; } __global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, const float* normals, float* density, const float* __restrict__ mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; //float density_i = variables[i + VAR_DENSITY*nelr]; float density_i = density[i]; float3 momentum_i; //momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; //momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; //momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; momentum_i.x = mx[i]; momentum_i.y = tex1Dfetch(tex_my,i);//my[i]; momentum_i.z = tex1Dfetch(tex_mz,i); //mz[i]; //float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); //float flux_i_density = float(0.0f); float flux_i_density = 0.0; float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < cfd_maxNeighbors; j++) { nb = elements_surrounding_elements[i + j*nelr]; //optimal layout already // |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ... // |Z for neighbor 0, Z for neighbor 1, ... | normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr]; normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr]; normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { //density_nb = variables[nb + VAR_DENSITY*nelr]; //momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; //momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; //momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_nb = density[nb]; momentum_nb.x = mx[nb]; momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb]; momentum_nb.z = tex1Dfetch(tex_mz,nb);//mz[nb]; //density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity //factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); factor = 1.3; flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; //if (threadIdx.x==0) atomicAdd(d_flag,1); } int main(int argc, char **argv) { srand(2013); // Allocate problem data on host //posVecType* position; //forceVecType* force; float *density; float *mx; float *my; float *mz; float *density_energy; float *normals; float *fluxes; int* cfd_neighborList; hipHostMalloc((void**)&density, cfd_nAtom*sizeof(float)); hipHostMalloc((void**)&mx, cfd_nAtom*sizeof(float)); hipHostMalloc((void**)&my, cfd_nAtom*sizeof(float)); hipHostMalloc((void**)&mz, cfd_nAtom*sizeof(float)); hipHostMalloc((void**)&density_energy, cfd_nAtom*sizeof(float)); hipHostMalloc((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float)); hipHostMalloc((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float)); hipHostMalloc((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int)); // Allocate device memory for position and force //forceVecType* d_force; //posVecType* d_position; float *d_density; float *d_mx; float *d_my; float *d_mz; float *d_density_energy; float *d_normals; float *d_fluxes; hipMalloc((void**)&d_density, cfd_nAtom*sizeof(float)); hipMalloc((void**)&d_mx, cfd_nAtom*sizeof(float)); hipMalloc((void**)&d_my, cfd_nAtom*sizeof(float)); hipMalloc((void**)&d_mz, cfd_nAtom*sizeof(float)); hipMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float)); hipMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float)); hipMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float)); hipMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float)); //hipMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType)); // Allocate device memory for neighbor list int* d_cfd_neighborList; hipMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int)); //cout << "Initializing test problem (this can take several " // "minutes for large problems)\n"; // Initialize positions -- random distribution in cubic domain // domainEdge constant specifies edge length for (int i = 0; i < cfd_nAtom; i++) { density[i] = (float)(drand48()); density_energy[i] = (float)(drand48() ); mx[i] = (float)(drand48() ); my[i] = (float)(drand48() ); mz[i] = (float)(drand48() ); /* density[i] = 1.1+i*0.01; density_energy[i] = 1.1+i*0.01; mx[i] = 1.1+i*0.01; my[i] = 1.1+i*0.01; mz[i] = 1.1+i*0.01; */ } for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i) normals[i] = (float)(drand48()); cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, BLOCK_SIZE); hipMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), hipMemcpyHostToDevice); // Copy data to GPU hipMemcpy(d_density, density, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_my, my, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), hipMemcpyHostToDevice); hipSetDeviceFlags(hipDeviceMapHost); int *flag_cfd,*d_flag_cfd; hipHostMalloc((void**)&flag_cfd,sizeof( int),hipHostMallocMapped); hipHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0); hipBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float)); hipBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float)); hipBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float)); int cfd_gridSize = (cfd_nAtom-1+BLOCK_SIZE) / BLOCK_SIZE; for(int i = 0; i <5; i++) { hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy, d_fluxes,d_flag_cfd); } hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); float kernel_time = 0.0f; hipEventRecord(kernel_start, 0); for(int i = 0; i <ITERATIONS; i++) { hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy, d_fluxes,d_flag_cfd); } hipDeviceSynchronize(); hipEventRecord(kernel_stop, 0); hipEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time/ITERATIONS << endl; hipMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), hipMemcpyDeviceToHost); //check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes); //TODO:verified on small inputs /* ifstream fluxesF("../org/fluxes.txt"); for(int i=0; i<cfd_nAtom*NVAR; ++i) { float f; fluxesF >> f; if(abs(f - fluxes[i]) > 0.001) { fprintf(stderr, "Test failed! i = %d\n", i); return 1; } }*/ // printf("Test passed!\n"); // fluxesF.close(); return 0; }
d30ff368b61cccaccf817ceb8b34f727fd4fd7e1.cu
#include "config.h" #include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; //#define cfd_SUPER_BLOCKS_PER_SM 5 //const int BLOCK_SIZE = 256; const int cfd_nBlksPerCluster = 16; const int cfd_nAtom = BLOCK_SIZE * MSIZE; const int cfd_maxNeighbors = 8; texture<float,1,cudaReadModeElementType> tex_my; texture<float,1,cudaReadModeElementType> tex_mz; texture<float,1,cudaReadModeElementType> tex_energy; inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom, int* neighborList, int blockSz) { //create non-uniform data sharing //but avoid that tasks sharing the same data are neighbor tasks by randomization vector<int> atomInds(nAtom); vector<int> blkInds((nAtom+blockSz-1)/blockSz); for(int i=0; i<blkInds.size(); ++i) blkInds[i] = i; random_shuffle(blkInds.begin(), blkInds.end()); int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int)); for(int i=0; i<blkInds.size(); ++i) blkOrder[i] = blkInds[i]; int j=0; for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it) { int blkInd = *it; for(int i=0; i<blockSz; ++i) atomInds[j++] = blkInd*blockSz + i; } int superBlockSz = blockSz * cfd_nBlksPerCluster; // Build Neighbor List for (int i = 0; i < nAtom; i++) { int start = i - i%superBlockSz; //difference is here //int end = i + (superBlockSz - i%superBlockSz)-1; int nNeighbors = 0; do { int j = start + rand() % superBlockSz; if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j]; nNeighbors ++; } while(nNeighbors<cfd_maxNeighbors); } return blkOrder; } #define GAMMA 1.4f #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define NDIM 3 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) __host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __host__ __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } void check_cfd(int nelr, int* elements_surrounding_elements, float* normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes) { const float smoothing_coefficient = float(0.2f); //const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int i=0;i<MSIZE*BLOCK_SIZE;i++){ int j, nb; float3 normal; float normal_len; float factor; //float density_i = variables[i + VAR_DENSITY*nelr]; float density_i = density[i]; float3 momentum_i; //momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; //momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; //momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; momentum_i.x = mx[i]; momentum_i.y = my[i]; momentum_i.z = mz[i]; //float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float density_energy_i = density_energy[i]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); //float flux_i_density = float(0.0f); float flux_i_density = 0.0; float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < cfd_maxNeighbors; j++) { nb = elements_surrounding_elements[i + j*nelr]; //optimal layout already // |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ... // |Z for neighbor 0, Z for neighbor 1, ... | normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr]; normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr]; normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { //density_nb = variables[nb + VAR_DENSITY*nelr]; //momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; //momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; //momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_nb = density[nb]; momentum_nb.x = mx[nb]; momentum_nb.y = my[nb]; momentum_nb.z = mz[nb]; //density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; density_energy_nb = density_energy[nb]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity //factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); factor = 1.3; flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } } /*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\ ((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/ if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\ ((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01))) {printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\ fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\ fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\ fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\ fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy); return;} } printf("GOOD! passed!\n"); return; } __global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, const float* normals, float* density, const float* __restrict__ mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; //float density_i = variables[i + VAR_DENSITY*nelr]; float density_i = density[i]; float3 momentum_i; //momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; //momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; //momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; momentum_i.x = mx[i]; momentum_i.y = tex1Dfetch(tex_my,i);//my[i]; momentum_i.z = tex1Dfetch(tex_mz,i); //mz[i]; //float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); //float flux_i_density = float(0.0f); float flux_i_density = 0.0; float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < cfd_maxNeighbors; j++) { nb = elements_surrounding_elements[i + j*nelr]; //optimal layout already // |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ... // |Z for neighbor 0, Z for neighbor 1, ... | normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr]; normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr]; normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { //density_nb = variables[nb + VAR_DENSITY*nelr]; //momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; //momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; //momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_nb = density[nb]; momentum_nb.x = mx[nb]; momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb]; momentum_nb.z = tex1Dfetch(tex_mz,nb);//mz[nb]; //density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity //factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); factor = 1.3; flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; //if (threadIdx.x==0) atomicAdd(d_flag,1); } int main(int argc, char **argv) { srand(2013); // Allocate problem data on host //posVecType* position; //forceVecType* force; float *density; float *mx; float *my; float *mz; float *density_energy; float *normals; float *fluxes; int* cfd_neighborList; cudaMallocHost((void**)&density, cfd_nAtom*sizeof(float)); cudaMallocHost((void**)&mx, cfd_nAtom*sizeof(float)); cudaMallocHost((void**)&my, cfd_nAtom*sizeof(float)); cudaMallocHost((void**)&mz, cfd_nAtom*sizeof(float)); cudaMallocHost((void**)&density_energy, cfd_nAtom*sizeof(float)); cudaMallocHost((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float)); cudaMallocHost((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float)); cudaMallocHost((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int)); // Allocate device memory for position and force //forceVecType* d_force; //posVecType* d_position; float *d_density; float *d_mx; float *d_my; float *d_mz; float *d_density_energy; float *d_normals; float *d_fluxes; cudaMalloc((void**)&d_density, cfd_nAtom*sizeof(float)); cudaMalloc((void**)&d_mx, cfd_nAtom*sizeof(float)); cudaMalloc((void**)&d_my, cfd_nAtom*sizeof(float)); cudaMalloc((void**)&d_mz, cfd_nAtom*sizeof(float)); cudaMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float)); cudaMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float)); cudaMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float)); cudaMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float)); //cudaMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType)); // Allocate device memory for neighbor list int* d_cfd_neighborList; cudaMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int)); //cout << "Initializing test problem (this can take several " // "minutes for large problems)\n"; // Initialize positions -- random distribution in cubic domain // domainEdge constant specifies edge length for (int i = 0; i < cfd_nAtom; i++) { density[i] = (float)(drand48()); density_energy[i] = (float)(drand48() ); mx[i] = (float)(drand48() ); my[i] = (float)(drand48() ); mz[i] = (float)(drand48() ); /* density[i] = 1.1+i*0.01; density_energy[i] = 1.1+i*0.01; mx[i] = 1.1+i*0.01; my[i] = 1.1+i*0.01; mz[i] = 1.1+i*0.01; */ } for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i) normals[i] = (float)(drand48()); cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, BLOCK_SIZE); cudaMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), cudaMemcpyHostToDevice); // Copy data to GPU cudaMemcpy(d_density, density, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_my, my, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), cudaMemcpyHostToDevice); cudaSetDeviceFlags(cudaDeviceMapHost); int *flag_cfd,*d_flag_cfd; cudaHostAlloc((void**)&flag_cfd,sizeof( int),cudaHostAllocMapped); cudaHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0); cudaBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float)); cudaBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float)); cudaBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float)); int cfd_gridSize = (cfd_nAtom-1+BLOCK_SIZE) / BLOCK_SIZE; for(int i = 0; i <5; i++) { cfd_kernel<<<cfd_gridSize, BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy, d_fluxes,d_flag_cfd); } cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); float kernel_time = 0.0f; cudaEventRecord(kernel_start, 0); for(int i = 0; i <ITERATIONS; i++) { cfd_kernel<<<cfd_gridSize, BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy, d_fluxes,d_flag_cfd); } cudaDeviceSynchronize(); cudaEventRecord(kernel_stop, 0); cudaEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time/ITERATIONS << endl; cudaMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), cudaMemcpyDeviceToHost); //check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes); //TODO:verified on small inputs /* ifstream fluxesF("../org/fluxes.txt"); for(int i=0; i<cfd_nAtom*NVAR; ++i) { float f; fluxesF >> f; if(abs(f - fluxes[i]) > 0.001) { fprintf(stderr, "Test failed! i = %d\n", i); return 1; } }*/ // printf("Test passed!\n"); // fluxesF.close(); return 0; }
87249530512ffe07d9a5af7103d90310327620f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************* LIBRARY: NDT-NEW DEVELOPER TOOLS FILE: ndt_reduce.cu AUTHOR: Zehuan Wang DATA: 12/20/2012 Reduction tools ************************************************************************** ************************************************************************** ROUTINES: ndt_block_reduce_min *************************************************************************/ #include "ndt.h" #ifndef MIN #define MIN(x,y) ((x)>(y)?(y):(x)) #endif template<typename T> __device__ T ndt_block_reduce_min(T *ndt_i_pt_array, const int ndt_i_i_length) { int t = threadIdx.x; bool odd = false; extern __shared__ T array[]; if(t < ndt_i_i_length) { array[t] = ndt_i_pt_array[t]; } __syncthreads(); int i = ndt_i_i_length; while(i > 1) { if((i>>1)*2 < i) { i = i>>1; i++; odd = true; } else { i = i>>1; odd = false; } if((t < i && odd == false) || t < i-1) array[t] = MIN(array[t+i],array[t]); __syncthreads(); } return array[0]; }
87249530512ffe07d9a5af7103d90310327620f1.cu
/************************************************************************* LIBRARY: NDT-NEW DEVELOPER TOOLS FILE: ndt_reduce.cu AUTHOR: Zehuan Wang DATA: 12/20/2012 Reduction tools ************************************************************************** ************************************************************************** ROUTINES: ndt_block_reduce_min *************************************************************************/ #include "ndt.h" #ifndef MIN #define MIN(x,y) ((x)>(y)?(y):(x)) #endif template<typename T> __device__ T ndt_block_reduce_min(T *ndt_i_pt_array, const int ndt_i_i_length) { int t = threadIdx.x; bool odd = false; extern __shared__ T array[]; if(t < ndt_i_i_length) { array[t] = ndt_i_pt_array[t]; } __syncthreads(); int i = ndt_i_i_length; while(i > 1) { if((i>>1)*2 < i) { i = i>>1; i++; odd = true; } else { i = i>>1; odd = false; } if((t < i && odd == false) || t < i-1) array[t] = MIN(array[t+i],array[t]); __syncthreads(); } return array[0]; }
46d76ac4f6f84c4a01111bd4e232a24b001e6dae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef __JOIN_KERNEL__ #define __JOIN_KERNEL__ #define MAX_LEVEL 9999 #define MAXDIMGRID 65535 #define MAXDIMBLOCK 1024 #define THREASHOLD 16 #define SHM_BUFF_SIZE 256 #define NESTED_BLOCK_SIZE 64 #define MAX_STREAM_NUM 16 #define WARP_SIZE 32 //#define GPU_PROFILE #ifdef GPU_PROFILE __device__ unsigned nested_calls = 0; __global__ void gpu_statistics(unsigned solution){ printf("====> GPU #%u - number of nested kernel calls:%u\n", solution, nested_calls); } #endif __device__ unsigned int gm_idx_pool[MAXDIMGRID*MAXDIMBLOCK/WARP_SIZE]; __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #if TORCH_HIP_VERSION <= 6000 __device__ inline double __shfl_down (double var, unsigned int src_lane, int width=32) { int2 a = *reinterpret_cast<int2*>(&var); a.x = __shfl_down(a.x, src_lane, width); a.y = __shfl_down(a.y, src_lane, width); return *reinterpret_cast<double*>(&a); } #endif __inline__ __device__ FLOAT_T warp_reduce_sum(FLOAT_T val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2) val += __shfl_down(val, offset); return val; } __inline__ __device__ FLOAT_T block_reduce_sum(FLOAT_T val) { static __shared__ FLOAT_T shared[32]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; val = warp_reduce_sum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0; if (wid==0) val = warp_reduce_sum(val); //Final reduce within first warp return val; } __inline__ __device__ int binary_search() { return -1; } __global__ void join_bitmap_init_kernel( char *frontier, int node_num ) { /* row == tid, each thread processes one row in sparse matrix */ int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid<node_num ) frontier[tid] = 1; } __global__ void join_kernel( int *ptr, int *indices, short *data, short *x, short *y, short *z, int node_num ) { /* row == tid, each thread processes one row in sparse matrix */ int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid<node_num ) { FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; /* access neighbours */ for (int i=start; i<end; ++i) { int pos = binary_search(); if ( pos!=-1 ) { // } else { } } y[tid] = dot; } } __global__ void csr_join_thread_queue_kernel( int *ptr, int *indices, int *queue, unsigned int *queue_length, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num ) { /* row == tid, each thread processes one row in sparse matrix */ int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int frontier_size = *queue_length; if ( tid<frontier_size ) { int row = queue[tid]; FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[row]; int end = ptr[row+1]; /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[row] = dot; } } __global__ void csr_join_block_queue_kernel(int *ptr, int *indices, int *queue, unsigned int *queue_length, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num ) { int bid = blockIdx.x+blockIdx.y*gridDim.x; //*MAXDIMBLOCK + threadIdx.x; int row = 0; FLOAT_T dot = 0; unsigned int frontier_size = *queue_length; if ( bid<frontier_size ){ row = queue[bid]; // grab a work from queue, bid is queue index /* get neighbour range */ int start = ptr[row]; int end = ptr[row+1]; /* access neighbours */ for (int i=start+threadIdx.x; i<end; i+=blockDim.x) { dot += data[i] * x[indices[i]]; } __syncthreads(); dot = block_reduce_sum( dot ); if ( threadIdx.x==0 ) { y[row] = dot; } } } /* LOAD BALANCING THROUGH DELAYED BUFFER */ /* implements a delayed buffer in shared memory: - in phase 1, the threads access the nodes in the queue with a thread-based mapping (one node per thread) - in phase 2, the blocks access the nodes in the delayed-buffer in a block-based mapping (one neighbor per thread) */ __global__ void csr_join_shared_delayed_buffer_kernel( int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int t_idx = 0; // thread-based variable used to index inside the delayed buffer __shared__ int buffer[SHM_BUFF_SIZE]; //delayed buffer __shared__ unsigned int idx; //index within the delayed buffer if (threadIdx.x==0) idx = 0; __syncthreads(); // 1st phase - thread-based mapping if ( tid<node_num) { FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { // insert into delayed buffer t_idx = atomicInc(&idx, SHM_BUFF_SIZE); buffer[t_idx] = tid; //nested_calls++; } } __syncthreads(); // 2nd phase - each block processed all the elements in its shared memory buffer; each thread process a different neighbor #ifdef GPU_PROFILE if (tid==0 && idx!=0) { printf("In Block %d # delayed nodes : %d\n", blockIdx.x, idx); } #endif for (int i=0; i<idx; i++) { FLOAT_T dot = 0; int row = buffer[i]; //grab an element from the buffer // get neighbour range int start = ptr[row]; int end = ptr[row+1]; // access neighbors - one thread per neigbor; for (int eid=start+threadIdx.x; eid<end; eid+=blockDim.x){ dot += data[eid] * x[indices[eid]]; } __syncthreads(); dot = block_reduce_sum( dot ); if ( threadIdx.x==0 ) { y[row] = dot; } } } /* implements phase 1 of delayed buffer (buffer) in global memory: - in phase 1, the threads access the nodes in the queue with a thread-based mapping (one node per thread) - phase 2 must be implemented by separately invoking the "process_buffer" kernel */ __global__ void csr_join_global_delayed_buffer_kernel( int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int *buffer, unsigned int *idx, int node_num ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int t_idx = 0; // 1st phase if ( tid<node_num) { FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { t_idx = atomicInc(idx, GM_BUFF_SIZE); buffer[t_idx] = tid; } } } /* LOAD BALANCING THROUGH DYNAMIC PARALLELISM */ /* Child kernel invoked by the dynamic parallelism implementation with multiple kernel calls This kernel processes the neighbors of a certain node. The starting and ending point (start and end parameters) within the edge array are given as parameter */ __global__ void join_process_neighbors( int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int start, int end, int row ) { int tid = blockIdx.x * blockDim.x + threadIdx.x + start; FLOAT_T dot = 0; if (tid < end) { dot += data[tid] * x[indices[tid]]; } __syncthreads(); dot = block_reduce_sum( dot ); if ( threadIdx.x==0 ) atomicAdd( &y[row], dot); } __global__ void csr_join_multidp_kernel(int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; hipStream_t s[MAX_STREAM_NUM]; for (int i=0; i<MAX_STREAM_NUM; ++i) { hipStreamCreateWithFlags(&s[i], hipStreamNonBlocking); } if ( tid<node_num ) { FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num<THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { #ifdef GPU_PROFILE nested_calls++; // printf("calling nested kernel for %d neighbors\n", edgeNum); #endif hipLaunchKernelGGL(( join_process_neighbors), dim3(edge_num/NESTED_BLOCK_SIZE+1), dim3(NESTED_BLOCK_SIZE),0,s[threadIdx.x%MAX_STREAM_NUM], indices, data, x, y, start, end, tid); } } } /* processes the elements in a buffer in block-based fashion. The buffer stores nodes ids in a queue */ __global__ void join_process_buffer( int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num, int *buffer, unsigned int buffer_size) { int bid = blockIdx.x; FLOAT_T dot = 0; if ( bid<buffer_size ) { // block-based mapping int row = buffer[bid]; //nodes processed by current block /* get neighbour range */ int start = ptr[row]; int end = ptr[row+1]; /* access neighbours */ for (int eid=start+threadIdx.x; eid<end; eid+=blockDim.x) { // eid is the identifier of the edge processed by the current thread dot += data[eid] * x[indices[eid]]; } __syncthreads(); dot = block_reduce_sum( dot ); if ( threadIdx.x==0 ) { y[row] = dot; } } } /* thread queue with dynamic parallelism and a single nested kernel call per thread-block*/ __global__ void csr_join_singledp_kernel(int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num, int *buffer) { hipStream_t s; hipStreamCreateWithFlags(&s, hipStreamNonBlocking); unsigned per_block_buffer = GM_BUFF_SIZE/gridDim.x; // amount of the buffer available to each thread block unsigned block_offset = blockIdx.x * per_block_buffer; // block offset within the buffer unsigned int *block_index = &gm_idx_pool[blockIdx.x]; // index of each block within its sub-buffer int t_idx = 0; // used to access the buffer if (threadIdx.x == 0) *block_index = 0; __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; // 1st phase if ( tid<node_num){ FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { t_idx = atomicInc(block_index, per_block_buffer); buffer[t_idx+block_offset] = tid; } } __syncthreads(); //2nd phase - nested kernel call if (threadIdx.x==0 && *block_index!=0){ #ifdef GPU_PROFILE nested_calls++; #endif hipLaunchKernelGGL(( join_process_buffer), dim3(*block_index),dim3(NESTED_BLOCK_SIZE),0,s, ptr, indices, data, x, y, node_num, buffer+block_offset, *block_index); } } /* thread queue with dynamic parallelism and a single nested kernel call per thread-block*/ __global__ void csr_join_warp_dp_kernel(int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num, int *buffer) { hipStream_t s[MAX_STREAM_NUM]; for (int i=0; i<MAX_STREAM_NUM; ++i) { hipStreamCreateWithFlags(&s[i], hipStreamNonBlocking); } int warpId = threadIdx.x / WARP_SIZE; int warpDim = blockDim.x / WARP_SIZE; int total_warp_num = gridDim.x * warpDim; unsigned per_warp_buffer = GM_BUFF_SIZE/total_warp_num; // amount of the buffer available to each thread block unsigned warp_offset = (blockIdx.x * warpDim + warpId) * per_warp_buffer; // block offset within the buffer unsigned int *warp_index = &gm_idx_pool[blockIdx.x * warpDim + warpId]; // index of each block within its sub-buffer int t_idx = 0; // used to access the buffer *warp_index = 0; int tid = blockIdx.x * blockDim.x + threadIdx.x; // 1st phase if ( tid<node_num){ FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { t_idx = atomicInc(warp_index, per_warp_buffer); buffer[t_idx+warp_offset] = tid; } } __syncthreads(); //2nd phase - nested kernel call if (threadIdx.x%WARP_SIZE==0 && *warp_index!=0){ #ifdef GPU_PROFILE nested_calls++; #endif hipLaunchKernelGGL(( join_process_buffer), dim3(*warp_index),dim3(NESTED_BLOCK_SIZE),0,s[threadIdx.x%MAX_STREAM_NUM], ptr, indices, data, x, y, node_num, buffer+warp_offset, *warp_index); } } /* thread queue with dynamic parallelism and a single nested kernel call per thread-block*/ __global__ void csr_join_block_dp_kernel(int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num, int *buffer) { hipStream_t s; hipStreamCreateWithFlags(&s, hipStreamNonBlocking); unsigned per_block_buffer = GM_BUFF_SIZE/gridDim.x; // amount of the buffer available to each thread block unsigned block_offset = blockIdx.x * per_block_buffer; // block offset within the buffer __shared__ int shm_buffer[MAXDIMBLOCK]; unsigned int *block_index = &gm_idx_pool[blockIdx.x]; // index of each block within its sub-buffer int t_idx = 0; // used to access the buffer if (threadIdx.x == 0) *block_index = 0; __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; // 1st phase if ( tid<node_num){ FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { t_idx = atomicInc(block_index, per_block_buffer); shm_buffer[t_idx] = tid; } } __syncthreads(); // dump shm_buffer to global buffer if (threadIdx.x<*block_index) { int idx = threadIdx.x + block_offset; buffer[idx] = shm_buffer[threadIdx.x]; } __syncthreads(); //2nd phase - nested kernel call if (threadIdx.x==0 && *block_index!=0){ #ifdef GPU_PROFILE nested_calls++; #endif hipLaunchKernelGGL(( join_process_buffer), dim3(*block_index),dim3(NESTED_BLOCK_SIZE),0,s, ptr, indices, data, x, y, node_num, buffer+block_offset, *block_index); } } /* thread queue with dynamic parallelism and a single nested kernel call per thread-block*/ __global__ void csr_join_grid_dp_kernel(int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num, int *buffer, unsigned int *idx, unsigned int *count) { hipStream_t s; hipStreamCreateWithFlags(&s, hipStreamNonBlocking); unsigned per_block_buffer = GM_BUFF_SIZE/gridDim.x; // amount of the buffer available to each thread block __shared__ int shm_buffer[MAXDIMBLOCK]; __shared__ unsigned int block_index; // index of each block within its sub-buffer __shared__ int offset; int t_idx = 0; // used to access the buffer if (threadIdx.x == 0) block_index = 0; __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; // 1st phase if ( tid<node_num){ FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { t_idx = atomicInc(&block_index, per_block_buffer); shm_buffer[t_idx] = tid; } } __syncthreads(); // reorganize consolidation buffer for load balance (get offset per block) if (threadIdx.x==0) { offset = atomicAdd(idx, block_index); } __syncthreads(); // dump shm_buffer to global buffer if (threadIdx.x<block_index) { int gm_idx = threadIdx.x + offset; buffer[gm_idx] = shm_buffer[threadIdx.x]; } __syncthreads(); // 2nd phase, grid level consolidation if (threadIdx.x==0) { // count up if ( atomicInc(count, MAXDIMGRID) >= (gridDim.x-1) ) {// //printf("gridDim.x: %d buffer: %d\n", gridDim.x, *idx); #ifdef GPU_PROFILE nested_calls++; #endif dim3 dimGridB(1,1,1); if (*idx<=MAXDIMGRID) { dimGridB.x = *idx; } else if (*idx<=MAXDIMGRID*NESTED_BLOCK_SIZE) { dimGridB.x = MAXDIMGRID; dimGridB.y = *idx/MAXDIMGRID+1; } else { printf("Too many elements in queue\n"); } hipLaunchKernelGGL(( csr_join_block_queue_kernel), dim3(dimGridB), dim3(NESTED_BLOCK_SIZE),0,s, ptr, indices, buffer, idx, data, x, y, node_num ); } } } __global__ void gen_dual_queue_workset_kernel(int *vertexArray, char *update, int nodeNumber, int *queue_l, unsigned int *queue_length_l, unsigned int queue_max_length_l, int *queue_h, unsigned int *queue_length_h, unsigned int queue_max_length_h) { int tid = blockIdx.x *blockDim.x + threadIdx.x; if ( tid<nodeNumber && update[tid] ) { update[tid] = 0; int start = vertexArray[tid]; int end = vertexArray[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* write vertex number to LOW degree queue */ unsigned int q_idx = atomicInc(queue_length_l, queue_max_length_l); queue_l[q_idx] = tid; } else { /* write vertex number to HIGH degree queue */ unsigned int q_idx = atomicInc(queue_length_h, queue_max_length_h); queue_h[q_idx] = tid; } } } #endif
46d76ac4f6f84c4a01111bd4e232a24b001e6dae.cu
#ifndef __JOIN_KERNEL__ #define __JOIN_KERNEL__ #define MAX_LEVEL 9999 #define MAXDIMGRID 65535 #define MAXDIMBLOCK 1024 #define THREASHOLD 16 #define SHM_BUFF_SIZE 256 #define NESTED_BLOCK_SIZE 64 #define MAX_STREAM_NUM 16 #define WARP_SIZE 32 //#define GPU_PROFILE #ifdef GPU_PROFILE __device__ unsigned nested_calls = 0; __global__ void gpu_statistics(unsigned solution){ printf("====> GPU #%u - number of nested kernel calls:%u\n", solution, nested_calls); } #endif __device__ unsigned int gm_idx_pool[MAXDIMGRID*MAXDIMBLOCK/WARP_SIZE]; __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #if CUDA_VERSION <= 6000 __device__ inline double __shfl_down (double var, unsigned int src_lane, int width=32) { int2 a = *reinterpret_cast<int2*>(&var); a.x = __shfl_down(a.x, src_lane, width); a.y = __shfl_down(a.y, src_lane, width); return *reinterpret_cast<double*>(&a); } #endif __inline__ __device__ FLOAT_T warp_reduce_sum(FLOAT_T val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2) val += __shfl_down(val, offset); return val; } __inline__ __device__ FLOAT_T block_reduce_sum(FLOAT_T val) { static __shared__ FLOAT_T shared[32]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; val = warp_reduce_sum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0; if (wid==0) val = warp_reduce_sum(val); //Final reduce within first warp return val; } __inline__ __device__ int binary_search() { return -1; } __global__ void join_bitmap_init_kernel( char *frontier, int node_num ) { /* row == tid, each thread processes one row in sparse matrix */ int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid<node_num ) frontier[tid] = 1; } __global__ void join_kernel( int *ptr, int *indices, short *data, short *x, short *y, short *z, int node_num ) { /* row == tid, each thread processes one row in sparse matrix */ int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid<node_num ) { FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; /* access neighbours */ for (int i=start; i<end; ++i) { int pos = binary_search(); if ( pos!=-1 ) { // } else { } } y[tid] = dot; } } __global__ void csr_join_thread_queue_kernel( int *ptr, int *indices, int *queue, unsigned int *queue_length, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num ) { /* row == tid, each thread processes one row in sparse matrix */ int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int frontier_size = *queue_length; if ( tid<frontier_size ) { int row = queue[tid]; FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[row]; int end = ptr[row+1]; /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[row] = dot; } } __global__ void csr_join_block_queue_kernel(int *ptr, int *indices, int *queue, unsigned int *queue_length, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num ) { int bid = blockIdx.x+blockIdx.y*gridDim.x; //*MAXDIMBLOCK + threadIdx.x; int row = 0; FLOAT_T dot = 0; unsigned int frontier_size = *queue_length; if ( bid<frontier_size ){ row = queue[bid]; // grab a work from queue, bid is queue index /* get neighbour range */ int start = ptr[row]; int end = ptr[row+1]; /* access neighbours */ for (int i=start+threadIdx.x; i<end; i+=blockDim.x) { dot += data[i] * x[indices[i]]; } __syncthreads(); dot = block_reduce_sum( dot ); if ( threadIdx.x==0 ) { y[row] = dot; } } } /* LOAD BALANCING THROUGH DELAYED BUFFER */ /* implements a delayed buffer in shared memory: - in phase 1, the threads access the nodes in the queue with a thread-based mapping (one node per thread) - in phase 2, the blocks access the nodes in the delayed-buffer in a block-based mapping (one neighbor per thread) */ __global__ void csr_join_shared_delayed_buffer_kernel( int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int t_idx = 0; // thread-based variable used to index inside the delayed buffer __shared__ int buffer[SHM_BUFF_SIZE]; //delayed buffer __shared__ unsigned int idx; //index within the delayed buffer if (threadIdx.x==0) idx = 0; __syncthreads(); // 1st phase - thread-based mapping if ( tid<node_num) { FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { // insert into delayed buffer t_idx = atomicInc(&idx, SHM_BUFF_SIZE); buffer[t_idx] = tid; //nested_calls++; } } __syncthreads(); // 2nd phase - each block processed all the elements in its shared memory buffer; each thread process a different neighbor #ifdef GPU_PROFILE if (tid==0 && idx!=0) { printf("In Block %d # delayed nodes : %d\n", blockIdx.x, idx); } #endif for (int i=0; i<idx; i++) { FLOAT_T dot = 0; int row = buffer[i]; //grab an element from the buffer // get neighbour range int start = ptr[row]; int end = ptr[row+1]; // access neighbors - one thread per neigbor; for (int eid=start+threadIdx.x; eid<end; eid+=blockDim.x){ dot += data[eid] * x[indices[eid]]; } __syncthreads(); dot = block_reduce_sum( dot ); if ( threadIdx.x==0 ) { y[row] = dot; } } } /* implements phase 1 of delayed buffer (buffer) in global memory: - in phase 1, the threads access the nodes in the queue with a thread-based mapping (one node per thread) - phase 2 must be implemented by separately invoking the "process_buffer" kernel */ __global__ void csr_join_global_delayed_buffer_kernel( int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int *buffer, unsigned int *idx, int node_num ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int t_idx = 0; // 1st phase if ( tid<node_num) { FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { t_idx = atomicInc(idx, GM_BUFF_SIZE); buffer[t_idx] = tid; } } } /* LOAD BALANCING THROUGH DYNAMIC PARALLELISM */ /* Child kernel invoked by the dynamic parallelism implementation with multiple kernel calls This kernel processes the neighbors of a certain node. The starting and ending point (start and end parameters) within the edge array are given as parameter */ __global__ void join_process_neighbors( int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int start, int end, int row ) { int tid = blockIdx.x * blockDim.x + threadIdx.x + start; FLOAT_T dot = 0; if (tid < end) { dot += data[tid] * x[indices[tid]]; } __syncthreads(); dot = block_reduce_sum( dot ); if ( threadIdx.x==0 ) atomicAdd( &y[row], dot); } __global__ void csr_join_multidp_kernel(int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; cudaStream_t s[MAX_STREAM_NUM]; for (int i=0; i<MAX_STREAM_NUM; ++i) { cudaStreamCreateWithFlags(&s[i], cudaStreamNonBlocking); } if ( tid<node_num ) { FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num<THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { #ifdef GPU_PROFILE nested_calls++; // printf("calling nested kernel for %d neighbors\n", edgeNum); #endif join_process_neighbors<<<edge_num/NESTED_BLOCK_SIZE+1, NESTED_BLOCK_SIZE,0,s[threadIdx.x%MAX_STREAM_NUM]>>>( indices, data, x, y, start, end, tid); } } } /* processes the elements in a buffer in block-based fashion. The buffer stores nodes ids in a queue */ __global__ void join_process_buffer( int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num, int *buffer, unsigned int buffer_size) { int bid = blockIdx.x; FLOAT_T dot = 0; if ( bid<buffer_size ) { // block-based mapping int row = buffer[bid]; //nodes processed by current block /* get neighbour range */ int start = ptr[row]; int end = ptr[row+1]; /* access neighbours */ for (int eid=start+threadIdx.x; eid<end; eid+=blockDim.x) { // eid is the identifier of the edge processed by the current thread dot += data[eid] * x[indices[eid]]; } __syncthreads(); dot = block_reduce_sum( dot ); if ( threadIdx.x==0 ) { y[row] = dot; } } } /* thread queue with dynamic parallelism and a single nested kernel call per thread-block*/ __global__ void csr_join_singledp_kernel(int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num, int *buffer) { cudaStream_t s; cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking); unsigned per_block_buffer = GM_BUFF_SIZE/gridDim.x; // amount of the buffer available to each thread block unsigned block_offset = blockIdx.x * per_block_buffer; // block offset within the buffer unsigned int *block_index = &gm_idx_pool[blockIdx.x]; // index of each block within its sub-buffer int t_idx = 0; // used to access the buffer if (threadIdx.x == 0) *block_index = 0; __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; // 1st phase if ( tid<node_num){ FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { t_idx = atomicInc(block_index, per_block_buffer); buffer[t_idx+block_offset] = tid; } } __syncthreads(); //2nd phase - nested kernel call if (threadIdx.x==0 && *block_index!=0){ #ifdef GPU_PROFILE nested_calls++; #endif join_process_buffer<<<*block_index,NESTED_BLOCK_SIZE,0,s>>>( ptr, indices, data, x, y, node_num, buffer+block_offset, *block_index); } } /* thread queue with dynamic parallelism and a single nested kernel call per thread-block*/ __global__ void csr_join_warp_dp_kernel(int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num, int *buffer) { cudaStream_t s[MAX_STREAM_NUM]; for (int i=0; i<MAX_STREAM_NUM; ++i) { cudaStreamCreateWithFlags(&s[i], cudaStreamNonBlocking); } int warpId = threadIdx.x / WARP_SIZE; int warpDim = blockDim.x / WARP_SIZE; int total_warp_num = gridDim.x * warpDim; unsigned per_warp_buffer = GM_BUFF_SIZE/total_warp_num; // amount of the buffer available to each thread block unsigned warp_offset = (blockIdx.x * warpDim + warpId) * per_warp_buffer; // block offset within the buffer unsigned int *warp_index = &gm_idx_pool[blockIdx.x * warpDim + warpId]; // index of each block within its sub-buffer int t_idx = 0; // used to access the buffer *warp_index = 0; int tid = blockIdx.x * blockDim.x + threadIdx.x; // 1st phase if ( tid<node_num){ FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { t_idx = atomicInc(warp_index, per_warp_buffer); buffer[t_idx+warp_offset] = tid; } } __syncthreads(); //2nd phase - nested kernel call if (threadIdx.x%WARP_SIZE==0 && *warp_index!=0){ #ifdef GPU_PROFILE nested_calls++; #endif join_process_buffer<<<*warp_index,NESTED_BLOCK_SIZE,0,s[threadIdx.x%MAX_STREAM_NUM]>>>( ptr, indices, data, x, y, node_num, buffer+warp_offset, *warp_index); } } /* thread queue with dynamic parallelism and a single nested kernel call per thread-block*/ __global__ void csr_join_block_dp_kernel(int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num, int *buffer) { cudaStream_t s; cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking); unsigned per_block_buffer = GM_BUFF_SIZE/gridDim.x; // amount of the buffer available to each thread block unsigned block_offset = blockIdx.x * per_block_buffer; // block offset within the buffer __shared__ int shm_buffer[MAXDIMBLOCK]; unsigned int *block_index = &gm_idx_pool[blockIdx.x]; // index of each block within its sub-buffer int t_idx = 0; // used to access the buffer if (threadIdx.x == 0) *block_index = 0; __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; // 1st phase if ( tid<node_num){ FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { t_idx = atomicInc(block_index, per_block_buffer); shm_buffer[t_idx] = tid; } } __syncthreads(); // dump shm_buffer to global buffer if (threadIdx.x<*block_index) { int idx = threadIdx.x + block_offset; buffer[idx] = shm_buffer[threadIdx.x]; } __syncthreads(); //2nd phase - nested kernel call if (threadIdx.x==0 && *block_index!=0){ #ifdef GPU_PROFILE nested_calls++; #endif join_process_buffer<<<*block_index,NESTED_BLOCK_SIZE,0,s>>>( ptr, indices, data, x, y, node_num, buffer+block_offset, *block_index); } } /* thread queue with dynamic parallelism and a single nested kernel call per thread-block*/ __global__ void csr_join_grid_dp_kernel(int *ptr, int *indices, FLOAT_T *data, FLOAT_T *x, FLOAT_T *y, int node_num, int *buffer, unsigned int *idx, unsigned int *count) { cudaStream_t s; cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking); unsigned per_block_buffer = GM_BUFF_SIZE/gridDim.x; // amount of the buffer available to each thread block __shared__ int shm_buffer[MAXDIMBLOCK]; __shared__ unsigned int block_index; // index of each block within its sub-buffer __shared__ int offset; int t_idx = 0; // used to access the buffer if (threadIdx.x == 0) block_index = 0; __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; // 1st phase if ( tid<node_num){ FLOAT_T dot = 0; /* get neighbour range */ int start = ptr[tid]; int end = ptr[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* access neighbours */ for (int i=start; i<end; ++i) { dot += data[i] * x[indices[i]]; } y[tid] = dot; } else { t_idx = atomicInc(&block_index, per_block_buffer); shm_buffer[t_idx] = tid; } } __syncthreads(); // reorganize consolidation buffer for load balance (get offset per block) if (threadIdx.x==0) { offset = atomicAdd(idx, block_index); } __syncthreads(); // dump shm_buffer to global buffer if (threadIdx.x<block_index) { int gm_idx = threadIdx.x + offset; buffer[gm_idx] = shm_buffer[threadIdx.x]; } __syncthreads(); // 2nd phase, grid level consolidation if (threadIdx.x==0) { // count up if ( atomicInc(count, MAXDIMGRID) >= (gridDim.x-1) ) {// //printf("gridDim.x: %d buffer: %d\n", gridDim.x, *idx); #ifdef GPU_PROFILE nested_calls++; #endif dim3 dimGridB(1,1,1); if (*idx<=MAXDIMGRID) { dimGridB.x = *idx; } else if (*idx<=MAXDIMGRID*NESTED_BLOCK_SIZE) { dimGridB.x = MAXDIMGRID; dimGridB.y = *idx/MAXDIMGRID+1; } else { printf("Too many elements in queue\n"); } csr_join_block_queue_kernel<<<dimGridB, NESTED_BLOCK_SIZE,0,s>>>(ptr, indices, buffer, idx, data, x, y, node_num ); } } } __global__ void gen_dual_queue_workset_kernel(int *vertexArray, char *update, int nodeNumber, int *queue_l, unsigned int *queue_length_l, unsigned int queue_max_length_l, int *queue_h, unsigned int *queue_length_h, unsigned int queue_max_length_h) { int tid = blockIdx.x *blockDim.x + threadIdx.x; if ( tid<nodeNumber && update[tid] ) { update[tid] = 0; int start = vertexArray[tid]; int end = vertexArray[tid+1]; int edge_num = end - start; if ( edge_num < THREASHOLD ) { /* write vertex number to LOW degree queue */ unsigned int q_idx = atomicInc(queue_length_l, queue_max_length_l); queue_l[q_idx] = tid; } else { /* write vertex number to HIGH degree queue */ unsigned int q_idx = atomicInc(queue_length_h, queue_max_length_h); queue_h[q_idx] = tid; } } } #endif
62c3d168b9cba7fa462dc368a2c78e72263960bd.hip
// !!! This is a file automatically generated by hipify!!! #include "DemoSingleGPU.h" #include "DeviceBuffer.h" #include "CudaExecConfig.h" #include "CudaUtilities.h" #include <vector> #include <memory> #include "HeatUtilities.h" #include "Mask.h" #include "StencilHeat.h" #include "HeatDemoDefs.h" #include "GPUs.h" #include "AsyncScheduler.h" #include "CudaPartition.h" #include <map> #include "MaskWithOffset.h" #include "StencilHeatWithOffset.h" #include "SequentialScheduler.h" #include "HeatDemoDefs.h" #include "ConstantHeatSource.h" #include "FileUtilities.h" #undef DEBUG_ASYNC //#define DEBUG_ASYNC using namespace std; using dev_buf_t = DeviceBuffer<float>; using dev_bufs_t = vector<dev_buf_t*>; using host_buf_t = UnpinnedBuffer<float>; using image_t = DeviceBuffer<uchar4>; const int width = default_width; const int height = default_height; #ifdef _DEBUG const string filename = "CUDA_demo_multi_DEBUG.bmp"; #else const string filename = "CUDA_demo_multi.bmp"; #endif // for (auto& p : parts) { // init(p); // } inline void do_something_with_the_data(HostBuffer<float>& h) { } host_buf_t* create_heat(const Extent2& ext) { host_buf_t* heat_host = new host_buf_t(ext); heat_host->alloc(); ConstantHeatSource c; c.generate(heat_host->get_ptr(), ext, num_heat_sources); return heat_host; } dev_buf_t* create_and_alloc(const Extent2& ext) { dev_buf_t* d = new DeviceBuffer<float>(ext); d->alloc(); return d; } #if defined(WINDOWS) void init(CudaPartition& partition); // forward decl void update(CudaPartition& partition); // forward decl void copy_back(CudaPartition& partition, HostBuffer<float>& host); // forward decl //IScheduler<CudaPartition>* scheduler = new SequentialScheduler<CudaPartition>(); IScheduler<CudaPartition>* scheduler = new AsyncScheduler<CudaPartition>(); void parallel_for(vector<CudaPartition> ps, function<void(CudaPartition&)> closure) { scheduler->sync(ps, closure); } // -------------------------------------------------- vector<CudaPartition> parts; // Fr jede GPU eine Partition map<GPU, dev_bufs_t> dev_bufs; // Fr jede GPU zwei Device-Buffer host_buf_t* heat_host; // Die Wrmequellen auf dem Host map<GPU, dev_buf_t*> heat_bufs; // Fr jede GPU eine Teilkopie der Wrmequellen int current = 0; // Der aktuelle Buffer 0 oder 1 void demo_multi() { GPUs gpus; // Die GPUs Extent2 ext(width, height); // Der globale Extent parts = calc_partitions(gpus, ext); // Init Partitionen heat_host = create_heat(ext); // Init Wrmequellen parallel_for(parts, ::init); // Init alle GPUs current = 1; for (int i = 0; i < iterations; i++) // Loop { parallel_for(parts, ::update); current = !current; } UnpinnedBuffer<float> host(ext); host.alloc(); // Kopiere D2H parallel_for(parts, [&host](CudaPartition& p) { copy_back(p, host); }); do_something_with_the_data(host); // do something with the data save_bmp(filename, host, max_heat); // Clean up for (auto& partition : parts) { GPU gpu = partition.framework.gpu; hipSetDevice(gpu.get_device_id()); CUDA::check("hipSetDevice [cleanup]"); for (auto& buf : dev_bufs[gpu]) { buf->free(); } heat_bufs[gpu]->free(); } dev_bufs.clear(); heat_bufs.clear(); heat_host->free(); delete heat_host; CUDA::reset_all(); } void init(CudaPartition& partition) { GPU gpu = partition.framework.gpu; hipSetDevice(gpu.get_device_id()); CUDA::check("hipSetDevice [init]"); XExtent2& x = partition.data.xext; Extent2 glbl = x.get_global_extent(); Region2 outer = x.get_outer(); Extent2 outer_extent = x.get_outer().get_extent(); dev_bufs[gpu].push_back(create_and_alloc(outer_extent)); dev_bufs[gpu].push_back(create_and_alloc(outer_extent)); hipMemset(dev_bufs[gpu][0]->get_ptr(), 0, dev_bufs[gpu][0]->get_size_in_bytes()); CUDA::check("hipMemset [init]"); // create local heat buffer and copy data from host dev_buf_t* loc = create_and_alloc(outer_extent); heat_bufs[gpu] = loc; float* src = &heat_host->get_ptr()[glbl.index(outer.get_offset())]; hipMemcpy(loc->get_ptr(), src, loc->get_size_in_bytes(), hipMemcpyHostToDevice); CUDA::check("hipMemcpy [init]"); } void update(CudaPartition& partition) { GPU gpu = partition.framework.gpu; hipSetDevice(gpu.get_device_id()); CUDA::check("hipSetDevice [update]"); float* src = dev_bufs[gpu][!current]->get_ptr(); float* dest = dev_bufs[gpu][current]->get_ptr(); XExtent2& x = partition.data.xext; Extent2 outer_extent = x.get_outer().get_extent(); Extent2 inner_extent = x.get_inner().get_extent(); Pos2 offset = x.get_offset_in_local(); // add_heat() mask_2d(outer_extent, *heat_bufs[gpu], *dev_bufs[gpu][!current], Pos2{ 0, 0 }); #ifdef _DEBUG_KERNELS hipDeviceSynchronize(); CUDA::check("mask_2d"); #endif // stencil() CudaExecConfig cfg_without(inner_extent); stencil_heat_2d(cfg_without, *dev_bufs[gpu][!current], *dev_bufs[gpu][current], inner_extent, offset, ct); #ifdef _DEBUG_KERNELS hipDeviceSynchronize(); CUDA::check("stencil_heat_2d"); #endif // Copy halo / ghost cells to other devices const int id = partition.partition_id; const int src_dev = partition.framework.gpu.get_device_id(); float* src_base = dev_bufs[gpu][current]->get_ptr(); if (x.has_high_overlap()) // && p < num_parts - 1) { // copy "high inner" to "low overlap", see slides of talk // ... from this GPU const int hii_idx = x.get_high_inner_start_index(); float* src = &src_base[hii_idx]; Region2 hii = x.get_high_inner(); const int sz = hii.get_extent().get_number_of_elems() * sizeof(float); // ... to next GPU CudaPartition dst_part = parts[id + 1]; GPU dst_gpu = dst_part.framework.gpu; const int dst_dev = dst_gpu.get_device_id(); float* dst = dev_bufs[dst_gpu][current]->get_ptr(); // low overlap = (0,0) #ifdef DEBUG_ASYNC hipMemcpyPeer(dst, dst_dev, src, src_dev, sz); CUDA::check("hipMemcpyPeer[Async] ->"); #else hipMemcpyPeerAsync(dst, dst_dev, src, src_dev, sz); #endif } if (x.has_low_overlap()) { // copy "low inner" to "high overlap", see slides of talk // ... from this GPU const int loi_idx = x.get_low_inner_start_index(); float* src = &src_base[loi_idx]; Region2 loi = x.get_low_inner(); const int sz = loi.get_extent().get_number_of_elems() * sizeof(float); // ... to prev GPU CudaPartition dst_part = parts[id - 1]; GPU dst_gpu = dst_part.framework.gpu; const int dst_dev = dst_gpu.get_device_id(); XExtent2& dst_x = dst_part.data.xext; // Wichtig: XExtent des Ziels! const int hio_idx = dst_x.get_high_overlap_start_index(); float* dst_base = dev_bufs[dst_gpu][current]->get_ptr(); float* dst = &dst_base[hio_idx]; #ifdef DEBUG_ASYNC hipMemcpyPeer(dst, dst_dev, src, src_dev, sz); CUDA::check("hipMemcpyPeer[Async] <-"); #else hipMemcpyPeerAsync(dst, dst_dev, src, src_dev, sz); #endif } } void copy_back(CudaPartition& partition, HostBuffer<float>& host) { GPU gpu = partition.framework.gpu; hipSetDevice(gpu.get_device_id()); CUDA::check("hipSetDevice [copy_back]"); hipDeviceSynchronize(); // Warte auf Beendigung CUDA::check("hipDeviceSynchronize [copy_back]"); XExtent2& x = partition.data.xext; Extent2 glbl = x.get_global_extent(); // Source dev_buf_t* src_buf = dev_bufs[gpu][!current]; const int src_idx = glbl.index(x.get_offset_in_local()); float* src = &(src_buf->get_ptr()[src_idx]); // Destination const Pos2 offset = x.get_inner_offset(); const int dst_idx = glbl.index(offset); float* dst = &(host.get_ptr()[dst_idx]); const size_t sz = x.get_inner().get_extent().get_number_of_elems() * sizeof(float); hipMemcpy(dst, src, sz, hipMemcpyDeviceToHost); CUDA::check("hipMemcpy"); } #endif // --------------------------------------------------------------------- #if defined(MAC) || defined(LINUX) void init(CudaPartition partition); // forward decl void update(CudaPartition partition); // forward decl void copy_back(CudaPartition partition, HostBuffer<float>& host); // forward decl #if defined(MAC) IScheduler<CudaPartition>* scheduler = new SequentialScheduler<CudaPartition>(); #else IScheduler<CudaPartition>* scheduler = new AsyncScheduler<CudaPartition>(); #endif void parallel_for(vector<CudaPartition> ps, function<void(CudaPartition)> closure) { scheduler->sync(ps, closure); } // -------------------------------------------------- vector<CudaPartition> parts; // Fr jede GPU eine Partition map<GPU, dev_bufs_t> dev_bufs; // Fr jede GPU zwei Device-Buffer host_buf_t* heat_host; // Die Wrmequellen auf dem Host map<GPU, dev_buf_t*> heat_bufs; // Fr jede GPU eine Teilkopie der Wrmequellen int current = 0; // Der aktuelle Buffer 0 oder 1 void demo_multi() { GPUs gpus; // Die GPUs Extent2 ext(width, height); // Der globale Extent parts = calc_partitions(gpus, ext); // Init Partitionen heat_host = create_heat(ext); // Init Wrmequellen parallel_for(parts, ::init); // Init alle GPUs current = 1; for (int i = 0; i < iterations; i++) // Loop { parallel_for(parts, ::update); current = !current; } UnpinnedBuffer<float> host(ext); host.alloc(); // Kopiere D2H parallel_for(parts, [&host](CudaPartition p) { copy_back(p, host); }); do_something_with_the_data(host); // do something with the data save_bmp(filename, host, max_heat); // Clean up for (auto& partition : parts) { GPU gpu = partition.framework.gpu; hipSetDevice(gpu.get_device_id()); CUDA::check("hipSetDevice [cleanup]"); for (auto& buf : dev_bufs[gpu]) { buf->free(); } heat_bufs[gpu]->free(); } dev_bufs.clear(); heat_bufs.clear(); heat_host->free(); delete heat_host; CUDA::reset_all(); } void init(CudaPartition partition) { GPU gpu = partition.framework.gpu; hipSetDevice(gpu.get_device_id()); CUDA::check("hipSetDevice [init]"); XExtent2& x = partition.data.xext; Extent2 glbl = x.get_global_extent(); Region2 outer = x.get_outer(); Extent2 outer_extent = x.get_outer().get_extent(); dev_bufs[gpu].push_back(create_and_alloc(outer_extent)); dev_bufs[gpu].push_back(create_and_alloc(outer_extent)); hipMemset(dev_bufs[gpu][0]->get_ptr(), 0, dev_bufs[gpu][0]->get_size_in_bytes()); CUDA::check("hipMemset [init]"); // create local heat buffer and copy data from host dev_buf_t* loc = create_and_alloc(outer_extent); heat_bufs[gpu] = loc; float* src = &heat_host->get_ptr()[glbl.index(outer.get_offset())]; hipMemcpy(loc->get_ptr(), src, loc->get_size_in_bytes(), hipMemcpyHostToDevice); CUDA::check("hipMemcpy [init]"); } void update(CudaPartition partition) { GPU gpu = partition.framework.gpu; hipSetDevice(gpu.get_device_id()); CUDA::check("hipSetDevice [update]"); float* src = dev_bufs[gpu][!current]->get_ptr(); float* dest = dev_bufs[gpu][current]->get_ptr(); XExtent2& x = partition.data.xext; Extent2 outer_extent = x.get_outer().get_extent(); Extent2 inner_extent = x.get_inner().get_extent(); Pos2 offset = x.get_offset_in_local(); // add_heat() mask_2d(outer_extent, *heat_bufs[gpu], *dev_bufs[gpu][!current], Pos2{ 0, 0 }); #ifdef _DEBUG_KERNELS hipDeviceSynchronize(); CUDA::check("mask_2d"); #endif // stencil() CudaExecConfig cfg_without(inner_extent); stencil_heat_2d(cfg_without, *dev_bufs[gpu][!current], *dev_bufs[gpu][current], inner_extent, offset, ct); #ifdef _DEBUG_KERNELS hipDeviceSynchronize(); CUDA::check("stencil_heat_2d"); #endif // Copy halo / ghost cells to other devices const int id = partition.partition_id; const int src_dev = partition.framework.gpu.get_device_id(); float* src_base = dev_bufs[gpu][current]->get_ptr(); if (x.has_high_overlap()) // && p < num_parts - 1) { // copy "high inner" to "low overlap", see slides of talk // ... from this GPU const int hii_idx = x.get_high_inner_start_index(); float* src = &src_base[hii_idx]; Region2 hii = x.get_high_inner(); const int sz = hii.get_extent().get_number_of_elems() * sizeof(float); // ... to next GPU CudaPartition dst_part = parts[id + 1]; GPU dst_gpu = dst_part.framework.gpu; const int dst_dev = dst_gpu.get_device_id(); float* dst = dev_bufs[dst_gpu][current]->get_ptr(); // low overlap = (0,0) #ifdef DEBUG_ASYNC hipMemcpyPeer(dst, dst_dev, src, src_dev, sz); CUDA::check("hipMemcpyPeer[Async] ->"); #else hipMemcpyPeerAsync(dst, dst_dev, src, src_dev, sz); #endif } if (x.has_low_overlap()) { // copy "low inner" to "high overlap", see slides of talk // ... from this GPU const int loi_idx = x.get_low_inner_start_index(); float* src = &src_base[loi_idx]; Region2 loi = x.get_low_inner(); const int sz = loi.get_extent().get_number_of_elems() * sizeof(float); // ... to prev GPU CudaPartition dst_part = parts[id - 1]; GPU dst_gpu = dst_part.framework.gpu; const int dst_dev = dst_gpu.get_device_id(); XExtent2& dst_x = dst_part.data.xext; // Wichtig: XExtent des Ziels! const int hio_idx = dst_x.get_high_overlap_start_index(); float* dst_base = dev_bufs[dst_gpu][current]->get_ptr(); float* dst = &dst_base[hio_idx]; #ifdef DEBUG_ASYNC hipMemcpyPeer(dst, dst_dev, src, src_dev, sz); CUDA::check("hipMemcpyPeer[Async] <-"); #else hipMemcpyPeerAsync(dst, dst_dev, src, src_dev, sz); #endif } } void copy_back(CudaPartition partition, HostBuffer<float>& host) { GPU gpu = partition.framework.gpu; hipSetDevice(gpu.get_device_id()); CUDA::check("hipSetDevice [copy_back]"); hipDeviceSynchronize(); // Warte auf Beendigung CUDA::check("hipDeviceSynchronize [copy_back]"); XExtent2& x = partition.data.xext; Extent2 glbl = x.get_global_extent(); // Source dev_buf_t* src_buf = dev_bufs[gpu][!current]; const int src_idx = glbl.index(x.get_offset_in_local()); float* src = &(src_buf->get_ptr()[src_idx]); // Destination const Pos2 offset = x.get_inner_offset(); const int dst_idx = glbl.index(offset); float* dst = &(host.get_ptr()[dst_idx]); const size_t sz = x.get_inner().get_extent().get_number_of_elems() * sizeof(float); hipMemcpy(dst, src, sz, hipMemcpyDeviceToHost); CUDA::check("hipMemcpy"); } #endif #undef FOLIE //#define FOLIE #ifdef FOLIE void x() { using dev_buf_t = DeviceBuffer<float>; using dev_bufs_t = vector<dev_buf_t*>; using host_buf_t = UnpinnedBuffer<float>; vector<CudaPartition> parts; // Fr jede GPU eine Partition map<GPU, dev_bufs_t> dev_bufs; // Fr jede GPU zwei Device-Buffer host_buf_t* heat_host; // Die Wrmequellen auf dem Host map<GPU, dev_buf_t*> heat_bufs; // Fr jede GPU Teil der Wrmequellen int current = 0; // Der aktuelle Buffer 0 oder 1 GPUs gpus; // Die GPUs Extent2 ext(width, height); // Der globale Extent parts = calc_partitions(gpus, ext); // Init Partitionen heat_host = create_heat(ext); // Init Wrmequellen parallel_for(parts, init); // Init alle GPUs current = 1; for (int i = 0; i < iterations; i++) // Loop { parallel_for(parts, ::update); current = !current; } UnpinnedBuffer<float> host(ext); host.alloc(); // Kopiere D2H parallel_for(parts, [&host](CudaPartition& p) { ::copy_back(p, host); }); do_something_with_the_data(host); // do something with the data } #endif
62c3d168b9cba7fa462dc368a2c78e72263960bd.cu
#include "DemoSingleGPU.h" #include "DeviceBuffer.h" #include "CudaExecConfig.h" #include "CudaUtilities.h" #include <vector> #include <memory> #include "HeatUtilities.h" #include "Mask.h" #include "StencilHeat.h" #include "HeatDemoDefs.h" #include "GPUs.h" #include "AsyncScheduler.h" #include "CudaPartition.h" #include <map> #include "MaskWithOffset.h" #include "StencilHeatWithOffset.h" #include "SequentialScheduler.h" #include "HeatDemoDefs.h" #include "ConstantHeatSource.h" #include "FileUtilities.h" #undef DEBUG_ASYNC //#define DEBUG_ASYNC using namespace std; using dev_buf_t = DeviceBuffer<float>; using dev_bufs_t = vector<dev_buf_t*>; using host_buf_t = UnpinnedBuffer<float>; using image_t = DeviceBuffer<uchar4>; const int width = default_width; const int height = default_height; #ifdef _DEBUG const string filename = "CUDA_demo_multi_DEBUG.bmp"; #else const string filename = "CUDA_demo_multi.bmp"; #endif // for (auto& p : parts) { // init(p); // } inline void do_something_with_the_data(HostBuffer<float>& h) { } host_buf_t* create_heat(const Extent2& ext) { host_buf_t* heat_host = new host_buf_t(ext); heat_host->alloc(); ConstantHeatSource c; c.generate(heat_host->get_ptr(), ext, num_heat_sources); return heat_host; } dev_buf_t* create_and_alloc(const Extent2& ext) { dev_buf_t* d = new DeviceBuffer<float>(ext); d->alloc(); return d; } #if defined(WINDOWS) void init(CudaPartition& partition); // forward decl void update(CudaPartition& partition); // forward decl void copy_back(CudaPartition& partition, HostBuffer<float>& host); // forward decl //IScheduler<CudaPartition>* scheduler = new SequentialScheduler<CudaPartition>(); IScheduler<CudaPartition>* scheduler = new AsyncScheduler<CudaPartition>(); void parallel_for(vector<CudaPartition> ps, function<void(CudaPartition&)> closure) { scheduler->sync(ps, closure); } // -------------------------------------------------- vector<CudaPartition> parts; // Für jede GPU eine Partition map<GPU, dev_bufs_t> dev_bufs; // Für jede GPU zwei Device-Buffer host_buf_t* heat_host; // Die Wärmequellen auf dem Host map<GPU, dev_buf_t*> heat_bufs; // Für jede GPU eine Teilkopie der Wärmequellen int current = 0; // Der aktuelle Buffer 0 oder 1 void demo_multi() { GPUs gpus; // Die GPUs Extent2 ext(width, height); // Der globale Extent parts = calc_partitions(gpus, ext); // Init Partitionen heat_host = create_heat(ext); // Init Wärmequellen parallel_for(parts, ::init); // Init alle GPUs current = 1; for (int i = 0; i < iterations; i++) // Loop { parallel_for(parts, ::update); current = !current; } UnpinnedBuffer<float> host(ext); host.alloc(); // Kopiere D2H parallel_for(parts, [&host](CudaPartition& p) { copy_back(p, host); }); do_something_with_the_data(host); // do something with the data save_bmp(filename, host, max_heat); // Clean up for (auto& partition : parts) { GPU gpu = partition.framework.gpu; cudaSetDevice(gpu.get_device_id()); CUDA::check("cudaSetDevice [cleanup]"); for (auto& buf : dev_bufs[gpu]) { buf->free(); } heat_bufs[gpu]->free(); } dev_bufs.clear(); heat_bufs.clear(); heat_host->free(); delete heat_host; CUDA::reset_all(); } void init(CudaPartition& partition) { GPU gpu = partition.framework.gpu; cudaSetDevice(gpu.get_device_id()); CUDA::check("cudaSetDevice [init]"); XExtent2& x = partition.data.xext; Extent2 glbl = x.get_global_extent(); Region2 outer = x.get_outer(); Extent2 outer_extent = x.get_outer().get_extent(); dev_bufs[gpu].push_back(create_and_alloc(outer_extent)); dev_bufs[gpu].push_back(create_and_alloc(outer_extent)); cudaMemset(dev_bufs[gpu][0]->get_ptr(), 0, dev_bufs[gpu][0]->get_size_in_bytes()); CUDA::check("cudaMemset [init]"); // create local heat buffer and copy data from host dev_buf_t* loc = create_and_alloc(outer_extent); heat_bufs[gpu] = loc; float* src = &heat_host->get_ptr()[glbl.index(outer.get_offset())]; cudaMemcpy(loc->get_ptr(), src, loc->get_size_in_bytes(), cudaMemcpyHostToDevice); CUDA::check("cudaMemcpy [init]"); } void update(CudaPartition& partition) { GPU gpu = partition.framework.gpu; cudaSetDevice(gpu.get_device_id()); CUDA::check("cudaSetDevice [update]"); float* src = dev_bufs[gpu][!current]->get_ptr(); float* dest = dev_bufs[gpu][current]->get_ptr(); XExtent2& x = partition.data.xext; Extent2 outer_extent = x.get_outer().get_extent(); Extent2 inner_extent = x.get_inner().get_extent(); Pos2 offset = x.get_offset_in_local(); // add_heat() mask_2d(outer_extent, *heat_bufs[gpu], *dev_bufs[gpu][!current], Pos2{ 0, 0 }); #ifdef _DEBUG_KERNELS cudaDeviceSynchronize(); CUDA::check("mask_2d"); #endif // stencil() CudaExecConfig cfg_without(inner_extent); stencil_heat_2d(cfg_without, *dev_bufs[gpu][!current], *dev_bufs[gpu][current], inner_extent, offset, ct); #ifdef _DEBUG_KERNELS cudaDeviceSynchronize(); CUDA::check("stencil_heat_2d"); #endif // Copy halo / ghost cells to other devices const int id = partition.partition_id; const int src_dev = partition.framework.gpu.get_device_id(); float* src_base = dev_bufs[gpu][current]->get_ptr(); if (x.has_high_overlap()) // && p < num_parts - 1) { // copy "high inner" to "low overlap", see slides of talk // ... from this GPU const int hii_idx = x.get_high_inner_start_index(); float* src = &src_base[hii_idx]; Region2 hii = x.get_high_inner(); const int sz = hii.get_extent().get_number_of_elems() * sizeof(float); // ... to next GPU CudaPartition dst_part = parts[id + 1]; GPU dst_gpu = dst_part.framework.gpu; const int dst_dev = dst_gpu.get_device_id(); float* dst = dev_bufs[dst_gpu][current]->get_ptr(); // low overlap = (0,0) #ifdef DEBUG_ASYNC cudaMemcpyPeer(dst, dst_dev, src, src_dev, sz); CUDA::check("cudaMemcpyPeer[Async] ->"); #else cudaMemcpyPeerAsync(dst, dst_dev, src, src_dev, sz); #endif } if (x.has_low_overlap()) { // copy "low inner" to "high overlap", see slides of talk // ... from this GPU const int loi_idx = x.get_low_inner_start_index(); float* src = &src_base[loi_idx]; Region2 loi = x.get_low_inner(); const int sz = loi.get_extent().get_number_of_elems() * sizeof(float); // ... to prev GPU CudaPartition dst_part = parts[id - 1]; GPU dst_gpu = dst_part.framework.gpu; const int dst_dev = dst_gpu.get_device_id(); XExtent2& dst_x = dst_part.data.xext; // Wichtig: XExtent des Ziels! const int hio_idx = dst_x.get_high_overlap_start_index(); float* dst_base = dev_bufs[dst_gpu][current]->get_ptr(); float* dst = &dst_base[hio_idx]; #ifdef DEBUG_ASYNC cudaMemcpyPeer(dst, dst_dev, src, src_dev, sz); CUDA::check("cudaMemcpyPeer[Async] <-"); #else cudaMemcpyPeerAsync(dst, dst_dev, src, src_dev, sz); #endif } } void copy_back(CudaPartition& partition, HostBuffer<float>& host) { GPU gpu = partition.framework.gpu; cudaSetDevice(gpu.get_device_id()); CUDA::check("cudaSetDevice [copy_back]"); cudaDeviceSynchronize(); // Warte auf Beendigung CUDA::check("cudaDeviceSynchronize [copy_back]"); XExtent2& x = partition.data.xext; Extent2 glbl = x.get_global_extent(); // Source dev_buf_t* src_buf = dev_bufs[gpu][!current]; const int src_idx = glbl.index(x.get_offset_in_local()); float* src = &(src_buf->get_ptr()[src_idx]); // Destination const Pos2 offset = x.get_inner_offset(); const int dst_idx = glbl.index(offset); float* dst = &(host.get_ptr()[dst_idx]); const size_t sz = x.get_inner().get_extent().get_number_of_elems() * sizeof(float); cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToHost); CUDA::check("cudaMemcpy"); } #endif // --------------------------------------------------------------------- #if defined(MAC) || defined(LINUX) void init(CudaPartition partition); // forward decl void update(CudaPartition partition); // forward decl void copy_back(CudaPartition partition, HostBuffer<float>& host); // forward decl #if defined(MAC) IScheduler<CudaPartition>* scheduler = new SequentialScheduler<CudaPartition>(); #else IScheduler<CudaPartition>* scheduler = new AsyncScheduler<CudaPartition>(); #endif void parallel_for(vector<CudaPartition> ps, function<void(CudaPartition)> closure) { scheduler->sync(ps, closure); } // -------------------------------------------------- vector<CudaPartition> parts; // Für jede GPU eine Partition map<GPU, dev_bufs_t> dev_bufs; // Für jede GPU zwei Device-Buffer host_buf_t* heat_host; // Die Wärmequellen auf dem Host map<GPU, dev_buf_t*> heat_bufs; // Für jede GPU eine Teilkopie der Wärmequellen int current = 0; // Der aktuelle Buffer 0 oder 1 void demo_multi() { GPUs gpus; // Die GPUs Extent2 ext(width, height); // Der globale Extent parts = calc_partitions(gpus, ext); // Init Partitionen heat_host = create_heat(ext); // Init Wärmequellen parallel_for(parts, ::init); // Init alle GPUs current = 1; for (int i = 0; i < iterations; i++) // Loop { parallel_for(parts, ::update); current = !current; } UnpinnedBuffer<float> host(ext); host.alloc(); // Kopiere D2H parallel_for(parts, [&host](CudaPartition p) { copy_back(p, host); }); do_something_with_the_data(host); // do something with the data save_bmp(filename, host, max_heat); // Clean up for (auto& partition : parts) { GPU gpu = partition.framework.gpu; cudaSetDevice(gpu.get_device_id()); CUDA::check("cudaSetDevice [cleanup]"); for (auto& buf : dev_bufs[gpu]) { buf->free(); } heat_bufs[gpu]->free(); } dev_bufs.clear(); heat_bufs.clear(); heat_host->free(); delete heat_host; CUDA::reset_all(); } void init(CudaPartition partition) { GPU gpu = partition.framework.gpu; cudaSetDevice(gpu.get_device_id()); CUDA::check("cudaSetDevice [init]"); XExtent2& x = partition.data.xext; Extent2 glbl = x.get_global_extent(); Region2 outer = x.get_outer(); Extent2 outer_extent = x.get_outer().get_extent(); dev_bufs[gpu].push_back(create_and_alloc(outer_extent)); dev_bufs[gpu].push_back(create_and_alloc(outer_extent)); cudaMemset(dev_bufs[gpu][0]->get_ptr(), 0, dev_bufs[gpu][0]->get_size_in_bytes()); CUDA::check("cudaMemset [init]"); // create local heat buffer and copy data from host dev_buf_t* loc = create_and_alloc(outer_extent); heat_bufs[gpu] = loc; float* src = &heat_host->get_ptr()[glbl.index(outer.get_offset())]; cudaMemcpy(loc->get_ptr(), src, loc->get_size_in_bytes(), cudaMemcpyHostToDevice); CUDA::check("cudaMemcpy [init]"); } void update(CudaPartition partition) { GPU gpu = partition.framework.gpu; cudaSetDevice(gpu.get_device_id()); CUDA::check("cudaSetDevice [update]"); float* src = dev_bufs[gpu][!current]->get_ptr(); float* dest = dev_bufs[gpu][current]->get_ptr(); XExtent2& x = partition.data.xext; Extent2 outer_extent = x.get_outer().get_extent(); Extent2 inner_extent = x.get_inner().get_extent(); Pos2 offset = x.get_offset_in_local(); // add_heat() mask_2d(outer_extent, *heat_bufs[gpu], *dev_bufs[gpu][!current], Pos2{ 0, 0 }); #ifdef _DEBUG_KERNELS cudaDeviceSynchronize(); CUDA::check("mask_2d"); #endif // stencil() CudaExecConfig cfg_without(inner_extent); stencil_heat_2d(cfg_without, *dev_bufs[gpu][!current], *dev_bufs[gpu][current], inner_extent, offset, ct); #ifdef _DEBUG_KERNELS cudaDeviceSynchronize(); CUDA::check("stencil_heat_2d"); #endif // Copy halo / ghost cells to other devices const int id = partition.partition_id; const int src_dev = partition.framework.gpu.get_device_id(); float* src_base = dev_bufs[gpu][current]->get_ptr(); if (x.has_high_overlap()) // && p < num_parts - 1) { // copy "high inner" to "low overlap", see slides of talk // ... from this GPU const int hii_idx = x.get_high_inner_start_index(); float* src = &src_base[hii_idx]; Region2 hii = x.get_high_inner(); const int sz = hii.get_extent().get_number_of_elems() * sizeof(float); // ... to next GPU CudaPartition dst_part = parts[id + 1]; GPU dst_gpu = dst_part.framework.gpu; const int dst_dev = dst_gpu.get_device_id(); float* dst = dev_bufs[dst_gpu][current]->get_ptr(); // low overlap = (0,0) #ifdef DEBUG_ASYNC cudaMemcpyPeer(dst, dst_dev, src, src_dev, sz); CUDA::check("cudaMemcpyPeer[Async] ->"); #else cudaMemcpyPeerAsync(dst, dst_dev, src, src_dev, sz); #endif } if (x.has_low_overlap()) { // copy "low inner" to "high overlap", see slides of talk // ... from this GPU const int loi_idx = x.get_low_inner_start_index(); float* src = &src_base[loi_idx]; Region2 loi = x.get_low_inner(); const int sz = loi.get_extent().get_number_of_elems() * sizeof(float); // ... to prev GPU CudaPartition dst_part = parts[id - 1]; GPU dst_gpu = dst_part.framework.gpu; const int dst_dev = dst_gpu.get_device_id(); XExtent2& dst_x = dst_part.data.xext; // Wichtig: XExtent des Ziels! const int hio_idx = dst_x.get_high_overlap_start_index(); float* dst_base = dev_bufs[dst_gpu][current]->get_ptr(); float* dst = &dst_base[hio_idx]; #ifdef DEBUG_ASYNC cudaMemcpyPeer(dst, dst_dev, src, src_dev, sz); CUDA::check("cudaMemcpyPeer[Async] <-"); #else cudaMemcpyPeerAsync(dst, dst_dev, src, src_dev, sz); #endif } } void copy_back(CudaPartition partition, HostBuffer<float>& host) { GPU gpu = partition.framework.gpu; cudaSetDevice(gpu.get_device_id()); CUDA::check("cudaSetDevice [copy_back]"); cudaDeviceSynchronize(); // Warte auf Beendigung CUDA::check("cudaDeviceSynchronize [copy_back]"); XExtent2& x = partition.data.xext; Extent2 glbl = x.get_global_extent(); // Source dev_buf_t* src_buf = dev_bufs[gpu][!current]; const int src_idx = glbl.index(x.get_offset_in_local()); float* src = &(src_buf->get_ptr()[src_idx]); // Destination const Pos2 offset = x.get_inner_offset(); const int dst_idx = glbl.index(offset); float* dst = &(host.get_ptr()[dst_idx]); const size_t sz = x.get_inner().get_extent().get_number_of_elems() * sizeof(float); cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToHost); CUDA::check("cudaMemcpy"); } #endif #undef FOLIE //#define FOLIE #ifdef FOLIE void x() { using dev_buf_t = DeviceBuffer<float>; using dev_bufs_t = vector<dev_buf_t*>; using host_buf_t = UnpinnedBuffer<float>; vector<CudaPartition> parts; // Für jede GPU eine Partition map<GPU, dev_bufs_t> dev_bufs; // Für jede GPU zwei Device-Buffer host_buf_t* heat_host; // Die Wärmequellen auf dem Host map<GPU, dev_buf_t*> heat_bufs; // Für jede GPU Teil der Wärmequellen int current = 0; // Der aktuelle Buffer 0 oder 1 GPUs gpus; // Die GPUs Extent2 ext(width, height); // Der globale Extent parts = calc_partitions(gpus, ext); // Init Partitionen heat_host = create_heat(ext); // Init Wärmequellen parallel_for(parts, init); // Init alle GPUs current = 1; for (int i = 0; i < iterations; i++) // Loop { parallel_for(parts, ::update); current = !current; } UnpinnedBuffer<float> host(ext); host.alloc(); // Kopiere D2H parallel_for(parts, [&host](CudaPartition& p) { ::copy_back(p, host); }); do_something_with_the_data(host); // do something with the data } #endif
edad64c9c440f871f7b4cad77014fb17f5b5aaad.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_rndwr_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *buffer = NULL; hipMalloc(&buffer, XSIZE*YSIZE); size_t reps = 1; size_t steps = 1; size_t elements = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_rndwr_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,reps,steps,elements); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_rndwr_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,reps,steps,elements); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_rndwr_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,reps,steps,elements); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
edad64c9c440f871f7b4cad77014fb17f5b5aaad.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_rndwr_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *buffer = NULL; cudaMalloc(&buffer, XSIZE*YSIZE); size_t reps = 1; size_t steps = 1; size_t elements = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_rndwr_kernel<<<gridBlock,threadBlock>>>(buffer,reps,steps,elements); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_rndwr_kernel<<<gridBlock,threadBlock>>>(buffer,reps,steps,elements); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_rndwr_kernel<<<gridBlock,threadBlock>>>(buffer,reps,steps,elements); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d62c837dd83555809cd6681a839f0df1ed7bfb9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> __global__ void Reduce(int* in_data, int* out_data) { extern __shared__ int shared_data[]; unsigned int tid = threadIdx.x; unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; shared_data[tid] = in_data[index]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { shared_data[tid] += shared_data[tid + s]; } __syncthreads(); } if (tid == 0) { out_data[blockIdx.x] = shared_data[0]; } } int main() { const int block_size = 256; // __shared__ int shared_data[]; const int array_size = 1 << 22; int* h_array = new int[array_size]; for (int i = 0; i < array_size; ++i) { h_array[i] = 1; } int* d_array; hipMalloc(&d_array, sizeof(int) * array_size); hipMemcpy(d_array, h_array, sizeof(int) * array_size, hipMemcpyHostToDevice); int num_blocks = array_size / block_size; int* d_blocksum; hipMalloc(&d_blocksum, sizeof(int) * num_blocks); int* h_blocksum = new int[num_blocks]; hipEvent_t start; hipEvent_t stop; // Creating event hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( Reduce), dim3(num_blocks), dim3(block_size), sizeof(int) * block_size, 0, d_array, d_blocksum); hipEventRecord(stop); hipMemcpy(h_blocksum, d_blocksum, sizeof(int) * num_blocks, hipMemcpyDeviceToHost); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << " elapsed" << std::endl; int sum = 0; for (int i = 0; i < num_blocks; ++i) { sum += h_blocksum[i]; } std::cout << sum << std::endl; hipFree(d_blocksum); hipFree(d_array); delete[] h_array; delete[] h_blocksum; }
d62c837dd83555809cd6681a839f0df1ed7bfb9f.cu
#include <iostream> __global__ void Reduce(int* in_data, int* out_data) { extern __shared__ int shared_data[]; unsigned int tid = threadIdx.x; unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; shared_data[tid] = in_data[index]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { shared_data[tid] += shared_data[tid + s]; } __syncthreads(); } if (tid == 0) { out_data[blockIdx.x] = shared_data[0]; } } int main() { const int block_size = 256; // __shared__ int shared_data[]; const int array_size = 1 << 22; int* h_array = new int[array_size]; for (int i = 0; i < array_size; ++i) { h_array[i] = 1; } int* d_array; cudaMalloc(&d_array, sizeof(int) * array_size); cudaMemcpy(d_array, h_array, sizeof(int) * array_size, cudaMemcpyHostToDevice); int num_blocks = array_size / block_size; int* d_blocksum; cudaMalloc(&d_blocksum, sizeof(int) * num_blocks); int* h_blocksum = new int[num_blocks]; cudaEvent_t start; cudaEvent_t stop; // Creating event cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); Reduce<<<num_blocks, block_size, sizeof(int) * block_size>>>(d_array, d_blocksum); cudaEventRecord(stop); cudaMemcpy(h_blocksum, d_blocksum, sizeof(int) * num_blocks, cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << " elapsed" << std::endl; int sum = 0; for (int i = 0; i < num_blocks; ++i) { sum += h_blocksum[i]; } std::cout << sum << std::endl; cudaFree(d_blocksum); cudaFree(d_array); delete[] h_array; delete[] h_blocksum; }
e0960e2dafb449a449ab2e5113e858584bad6d2c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void _bcnn_backward_depthwise_conv_data_kernel( int nthreads, float *dst_grad, float *weight_data, int batch_size, const int channels, int dst_h, int dst_w, const int src_h, const int src_w, int kernel_sz, int stride, int pad, float *src_grad) { int i, n, c, h, w, kw, kh, h_out_s, w_out_s, h_out, w_out, offset; float value = 0.0f; float *weight = NULL; for (i = blockIdx.x * blockDim.x + threadIdx.x; i < nthreads; i += blockDim.x * gridDim.x) { n = i / channels / src_h / src_w; c = (i / src_h / src_w) % channels; h = (i / src_w) % src_h; w = i % src_w; weight = weight_data + c * kernel_sz * kernel_sz; value = 0.0f; for (kh = 0; kh < kernel_sz; ++kh) { for (kw = 0; kw < kernel_sz; ++kw) { h_out_s = h + pad - kh; w_out_s = w + pad - kw; if (((h_out_s % stride) == 0) && ((w_out_s % stride) == 0)) { h_out = h_out_s / stride; w_out = w_out_s / stride; if ((h_out >= 0) && (h_out < dst_h) && (w_out >= 0) && (w_out < dst_w)) { offset = ((n * channels + c) * dst_h + h_out) * dst_w + w_out; value += (*weight) * dst_grad[offset]; } } ++weight; } } src_grad[i] += value; } }
e0960e2dafb449a449ab2e5113e858584bad6d2c.cu
#include "includes.h" __global__ void _bcnn_backward_depthwise_conv_data_kernel( int nthreads, float *dst_grad, float *weight_data, int batch_size, const int channels, int dst_h, int dst_w, const int src_h, const int src_w, int kernel_sz, int stride, int pad, float *src_grad) { int i, n, c, h, w, kw, kh, h_out_s, w_out_s, h_out, w_out, offset; float value = 0.0f; float *weight = NULL; for (i = blockIdx.x * blockDim.x + threadIdx.x; i < nthreads; i += blockDim.x * gridDim.x) { n = i / channels / src_h / src_w; c = (i / src_h / src_w) % channels; h = (i / src_w) % src_h; w = i % src_w; weight = weight_data + c * kernel_sz * kernel_sz; value = 0.0f; for (kh = 0; kh < kernel_sz; ++kh) { for (kw = 0; kw < kernel_sz; ++kw) { h_out_s = h + pad - kh; w_out_s = w + pad - kw; if (((h_out_s % stride) == 0) && ((w_out_s % stride) == 0)) { h_out = h_out_s / stride; w_out = w_out_s / stride; if ((h_out >= 0) && (h_out < dst_h) && (w_out >= 0) && (w_out < dst_w)) { offset = ((n * channels + c) * dst_h + h_out) * dst_w + w_out; value += (*weight) * dst_grad[offset]; } } ++weight; } } src_grad[i] += value; } }
3135407af054df95a08b76babedb5b69a07891af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { namespace { template<typename T> __device__ T BilinearInterpolate(const T* channel_dptr, const int32_t height, const int32_t width, T y, T x) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int32_t y_low = static_cast<int32_t>(y); int32_t x_low = static_cast<int32_t>(x); int32_t y_high = 0; int32_t x_high = 0; if (y_low >= height - 1) { y_low = height - 1; y_high = y_low; y = static_cast<T>(y_low); } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_low = width - 1; x_high = x_low; x = static_cast<T>(x_low); } else { x_high = x_low + 1; } const T ly = y - y_low; const T lx = x - x_low; const T hy = 1.f - ly; const T hx = 1.f - lx; // https://en.wikipedia.org/wiki/Bilinear_interpolation const int64_t q11 = y_low * width + x_low; const int64_t q21 = y_low * width + x_high; const int64_t q12 = y_high * width + x_low; const int64_t q22 = y_high * width + x_high; // no 1 / (x_high - x_low) * (y_high - y_low) because it will always be 1 in RoI Align return (hy * hx) * channel_dptr[q11] + (hy * lx) * channel_dptr[q21] + (ly * hx) * channel_dptr[q12] + (ly * lx) * channel_dptr[q22]; } template<typename T> __device__ bool BilinearInterpolateDiff(const T bin_diff_avg, const int64_t height, const int64_t width, T y, T x, T& diff11, T& diff21, T& diff12, T& diff22, int32_t& x_low, int32_t& x_high, int32_t& y_low, int32_t& y_high) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return false; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } y_low = static_cast<int32_t>(y); x_low = static_cast<int32_t>(x); if (y_low >= height - 1) { y_low = height - 1; y_high = y_low; y = static_cast<T>(y_low); } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_low = width - 1; x_high = x_low; x = static_cast<T>(x_low); } else { x_high = x_low + 1; } const T ly = y - y_low; const T lx = x - x_low; const T hy = 1.f - ly; const T hx = 1.f - lx; diff11 = bin_diff_avg * hy * hx; diff21 = bin_diff_avg * hy * lx; diff12 = bin_diff_avg * ly * hx; diff22 = bin_diff_avg * ly * lx; return true; } template<typename T> __global__ void RoiAlignForward(const int64_t nthreads, const T* in_dptr, const T* rois_dptr, const T spatial_scale, const int32_t sampling_ratio, const int64_t channel_num, const int64_t height, const int64_t width, const int64_t pooled_height, const int64_t pooled_width, const bool aligned, T* out_dptr) { const int64_t pooled_area = pooled_height * pooled_width; const int64_t channel_pooled_area = channel_num * pooled_height * pooled_width; CUDA_1D_KERNEL_LOOP(index, nthreads) { const int64_t h = (index / pooled_width) % pooled_height; const int64_t w = index % pooled_width; const int64_t c = (index / pooled_area) % channel_num; const int64_t r = index / channel_pooled_area; const T* offset_rois_dptr = rois_dptr + r * 5; const int64_t n = static_cast<int64_t>(offset_rois_dptr[0]); const T align_offset = aligned ? static_cast<T>(0.5) : static_cast<T>(0.f); const T roi_start_w = offset_rois_dptr[1] * spatial_scale - align_offset; const T roi_start_h = offset_rois_dptr[2] * spatial_scale - align_offset; const T roi_end_w = offset_rois_dptr[3] * spatial_scale - align_offset; const T roi_end_h = offset_rois_dptr[4] * spatial_scale - align_offset; T roi_height = roi_end_h - roi_start_h; T roi_width = roi_end_w - roi_start_w; // aligned == false is for compatibility. the argument "aligned" doesn't have the semantic of // determining minimum roi size if (aligned == false) { roi_height = max(roi_height, static_cast<T>(1.0)); roi_width = max(roi_width, static_cast<T>(1.0)); } const T bin_height = static_cast<T>(roi_height) / static_cast<T>(pooled_height); const T bin_width = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const int32_t bin_grid_height = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); const int32_t bin_grid_width = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = max(bin_grid_height * bin_grid_width, 1); const T* channel_dptr = in_dptr + (n * channel_num + c) * height * width; T out_val = 0.0; FOR_RANGE(int64_t, grid_i, 0, bin_grid_height) { // + .5f for center position T y = roi_start_h + h * bin_height + static_cast<T>(grid_i + 0.5f) * bin_height / static_cast<T>(bin_grid_height); FOR_RANGE(int64_t, grid_j, 0, bin_grid_width) { T x = roi_start_w + w * bin_width + static_cast<T>(grid_j + 0.5f) * bin_width / static_cast<T>(bin_grid_width); out_val += BilinearInterpolate(channel_dptr, height, width, y, x); } } out_dptr[index] = out_val / count; } } template<typename T> __global__ void RoiAlignBackward(const int64_t nthreads, const T* out_diff_dptr, const T* rois_dptr, const T spatial_scale, const int32_t sampling_ratio, const int64_t channel_num, const int64_t height, const int64_t width, const int64_t pooled_height, const int64_t pooled_width, const bool aligned, T* in_diff_dptr) { const int64_t pooled_area = pooled_height * pooled_width; const int64_t channel_pooled_area = channel_num * pooled_height * pooled_width; CUDA_1D_KERNEL_LOOP(index, nthreads) { const int64_t h = (index / pooled_width) % pooled_height; const int64_t w = index % pooled_width; const int64_t c = (index / pooled_area) % channel_num; const int64_t r = index / channel_pooled_area; const T* offset_rois_dptr = rois_dptr + r * 5; const int64_t n = static_cast<int64_t>(offset_rois_dptr[0]); const T align_offset = aligned ? static_cast<T>(0.5) : static_cast<T>(0.f); const T roi_start_w = offset_rois_dptr[1] * spatial_scale - align_offset; const T roi_start_h = offset_rois_dptr[2] * spatial_scale - align_offset; const T roi_end_w = offset_rois_dptr[3] * spatial_scale - align_offset; const T roi_end_h = offset_rois_dptr[4] * spatial_scale - align_offset; T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; // aligned == false is for compatibility. the argument "aligned" doesn't have the semantic of // determining minimum roi size if (aligned == false) { roi_height = max(roi_height, static_cast<T>(1.0)); roi_width = max(roi_width, static_cast<T>(1.0)); } const T bin_height = static_cast<T>(roi_height) / static_cast<T>(pooled_height); const T bin_width = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const int32_t bin_grid_height = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); const int32_t bin_grid_width = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = max(bin_grid_height * bin_grid_width, 1); const T bin_diff_avg = out_diff_dptr[index] / count; T* in_diff_channel_dptr = in_diff_dptr + (n * channel_num + c) * height * width; FOR_RANGE(int64_t, grid_i, 0, bin_grid_height) { // + .5f for center position T y = roi_start_h + h * bin_height + static_cast<T>(grid_i + 0.5f) * bin_height / static_cast<T>(bin_grid_height); FOR_RANGE(int64_t, grid_j, 0, bin_grid_width) { T x = roi_start_w + w * bin_width + static_cast<T>(grid_j + 0.5f) * bin_width / static_cast<T>(bin_grid_width); T diff11 = 0; T diff21 = 0; T diff12 = 0; T diff22 = 0; int32_t x_low = 0; int32_t x_high = 0; int32_t y_low = 0; int32_t y_high = 0; bool has_diff = BilinearInterpolateDiff(bin_diff_avg, height, width, y, x, diff11, diff21, diff12, diff22, x_low, x_high, y_low, y_high); if (has_diff) { const int64_t q11 = y_low * width + x_low; const int64_t q21 = y_low * width + x_high; const int64_t q12 = y_high * width + x_low; const int64_t q22 = y_high * width + x_high; atomicAdd(in_diff_channel_dptr + q11, diff11); atomicAdd(in_diff_channel_dptr + q21, diff21); atomicAdd(in_diff_channel_dptr + q12, diff12); atomicAdd(in_diff_channel_dptr + q22, diff22); } } } } } } // namespace template<typename T> class RoIAlignKernel final : public user_op::OpKernel { public: RoIAlignKernel() = default; ~RoIAlignKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* x_blob = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* rois_blob = ctx->Tensor4ArgNameAndIndex("rois", 0); if (rois_blob->shape_view().elem_cnt() == 0) { return; } user_op::Tensor* y_blob = ctx->Tensor4ArgNameAndIndex("y", 0); const int32_t pooled_h = ctx->Attr<int32_t>("pooled_h"); const int32_t pooled_w = ctx->Attr<int32_t>("pooled_w"); const float spatial_scale = ctx->Attr<float>("spatial_scale"); const int32_t sampling_ratio = ctx->Attr<int32_t>("sampling_ratio"); const bool aligned = ctx->Attr<bool>("aligned"); const int64_t elem_cnt = y_blob->shape_view().elem_cnt(); hipLaunchKernelGGL(( RoiAlignForward<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream(), elem_cnt, x_blob->dptr<T>(), rois_blob->dptr<T>(), spatial_scale, sampling_ratio, x_blob->shape_view().At(1), x_blob->shape_view().At(2), x_blob->shape_view().At(3), pooled_h, pooled_w, aligned, y_blob->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<typename T> class RoIAlignGradKernel final : public user_op::OpKernel { public: RoIAlignGradKernel() = default; ~RoIAlignGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { user_op::Tensor* dx_blob = ctx->Tensor4ArgNameAndIndex("dx", 0); if (dx_blob == nullptr) { return; } Memset<DeviceType::kCUDA>(ctx->stream(), dx_blob->mut_dptr<T>(), 0, dx_blob->shape_view().elem_cnt() * sizeof(T)); const user_op::Tensor* dy_blob = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* rois_blob = ctx->Tensor4ArgNameAndIndex("rois", 0); const int32_t pooled_h = ctx->Attr<int32_t>("pooled_h"); const int32_t pooled_w = ctx->Attr<int32_t>("pooled_w"); const float spatial_scale = ctx->Attr<float>("spatial_scale"); const int32_t sampling_ratio = ctx->Attr<int32_t>("sampling_ratio"); const bool aligned = ctx->Attr<bool>("aligned"); const int64_t elem_cnt = dy_blob->shape_view().elem_cnt(); if (elem_cnt > 0) { hipLaunchKernelGGL(( RoiAlignBackward<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream(), elem_cnt, dy_blob->dptr<T>(), rois_blob->dptr<T>(), spatial_scale, sampling_ratio, dx_blob->shape_view().At(1), dx_blob->shape_view().At(2), dx_blob->shape_view().At(3), pooled_h, pooled_w, aligned, dx_blob->mut_dptr<T>()); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; REGISTER_USER_KERNEL("roi_align") .SetCreateFn<RoIAlignKernel<float>>() .SetIsMatchedHob(user_op::HobDeviceType() == DeviceType::kCUDA); REGISTER_USER_KERNEL("roi_align_grad") .SetCreateFn<RoIAlignGradKernel<float>>() .SetIsMatchedHob(user_op::HobDeviceType() == DeviceType::kCUDA); } // namespace oneflow
3135407af054df95a08b76babedb5b69a07891af.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { namespace { template<typename T> __device__ T BilinearInterpolate(const T* channel_dptr, const int32_t height, const int32_t width, T y, T x) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int32_t y_low = static_cast<int32_t>(y); int32_t x_low = static_cast<int32_t>(x); int32_t y_high = 0; int32_t x_high = 0; if (y_low >= height - 1) { y_low = height - 1; y_high = y_low; y = static_cast<T>(y_low); } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_low = width - 1; x_high = x_low; x = static_cast<T>(x_low); } else { x_high = x_low + 1; } const T ly = y - y_low; const T lx = x - x_low; const T hy = 1.f - ly; const T hx = 1.f - lx; // https://en.wikipedia.org/wiki/Bilinear_interpolation const int64_t q11 = y_low * width + x_low; const int64_t q21 = y_low * width + x_high; const int64_t q12 = y_high * width + x_low; const int64_t q22 = y_high * width + x_high; // no 1 / (x_high - x_low) * (y_high - y_low) because it will always be 1 in RoI Align return (hy * hx) * channel_dptr[q11] + (hy * lx) * channel_dptr[q21] + (ly * hx) * channel_dptr[q12] + (ly * lx) * channel_dptr[q22]; } template<typename T> __device__ bool BilinearInterpolateDiff(const T bin_diff_avg, const int64_t height, const int64_t width, T y, T x, T& diff11, T& diff21, T& diff12, T& diff22, int32_t& x_low, int32_t& x_high, int32_t& y_low, int32_t& y_high) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return false; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } y_low = static_cast<int32_t>(y); x_low = static_cast<int32_t>(x); if (y_low >= height - 1) { y_low = height - 1; y_high = y_low; y = static_cast<T>(y_low); } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_low = width - 1; x_high = x_low; x = static_cast<T>(x_low); } else { x_high = x_low + 1; } const T ly = y - y_low; const T lx = x - x_low; const T hy = 1.f - ly; const T hx = 1.f - lx; diff11 = bin_diff_avg * hy * hx; diff21 = bin_diff_avg * hy * lx; diff12 = bin_diff_avg * ly * hx; diff22 = bin_diff_avg * ly * lx; return true; } template<typename T> __global__ void RoiAlignForward(const int64_t nthreads, const T* in_dptr, const T* rois_dptr, const T spatial_scale, const int32_t sampling_ratio, const int64_t channel_num, const int64_t height, const int64_t width, const int64_t pooled_height, const int64_t pooled_width, const bool aligned, T* out_dptr) { const int64_t pooled_area = pooled_height * pooled_width; const int64_t channel_pooled_area = channel_num * pooled_height * pooled_width; CUDA_1D_KERNEL_LOOP(index, nthreads) { const int64_t h = (index / pooled_width) % pooled_height; const int64_t w = index % pooled_width; const int64_t c = (index / pooled_area) % channel_num; const int64_t r = index / channel_pooled_area; const T* offset_rois_dptr = rois_dptr + r * 5; const int64_t n = static_cast<int64_t>(offset_rois_dptr[0]); const T align_offset = aligned ? static_cast<T>(0.5) : static_cast<T>(0.f); const T roi_start_w = offset_rois_dptr[1] * spatial_scale - align_offset; const T roi_start_h = offset_rois_dptr[2] * spatial_scale - align_offset; const T roi_end_w = offset_rois_dptr[3] * spatial_scale - align_offset; const T roi_end_h = offset_rois_dptr[4] * spatial_scale - align_offset; T roi_height = roi_end_h - roi_start_h; T roi_width = roi_end_w - roi_start_w; // aligned == false is for compatibility. the argument "aligned" doesn't have the semantic of // determining minimum roi size if (aligned == false) { roi_height = max(roi_height, static_cast<T>(1.0)); roi_width = max(roi_width, static_cast<T>(1.0)); } const T bin_height = static_cast<T>(roi_height) / static_cast<T>(pooled_height); const T bin_width = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const int32_t bin_grid_height = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); const int32_t bin_grid_width = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = max(bin_grid_height * bin_grid_width, 1); const T* channel_dptr = in_dptr + (n * channel_num + c) * height * width; T out_val = 0.0; FOR_RANGE(int64_t, grid_i, 0, bin_grid_height) { // + .5f for center position T y = roi_start_h + h * bin_height + static_cast<T>(grid_i + 0.5f) * bin_height / static_cast<T>(bin_grid_height); FOR_RANGE(int64_t, grid_j, 0, bin_grid_width) { T x = roi_start_w + w * bin_width + static_cast<T>(grid_j + 0.5f) * bin_width / static_cast<T>(bin_grid_width); out_val += BilinearInterpolate(channel_dptr, height, width, y, x); } } out_dptr[index] = out_val / count; } } template<typename T> __global__ void RoiAlignBackward(const int64_t nthreads, const T* out_diff_dptr, const T* rois_dptr, const T spatial_scale, const int32_t sampling_ratio, const int64_t channel_num, const int64_t height, const int64_t width, const int64_t pooled_height, const int64_t pooled_width, const bool aligned, T* in_diff_dptr) { const int64_t pooled_area = pooled_height * pooled_width; const int64_t channel_pooled_area = channel_num * pooled_height * pooled_width; CUDA_1D_KERNEL_LOOP(index, nthreads) { const int64_t h = (index / pooled_width) % pooled_height; const int64_t w = index % pooled_width; const int64_t c = (index / pooled_area) % channel_num; const int64_t r = index / channel_pooled_area; const T* offset_rois_dptr = rois_dptr + r * 5; const int64_t n = static_cast<int64_t>(offset_rois_dptr[0]); const T align_offset = aligned ? static_cast<T>(0.5) : static_cast<T>(0.f); const T roi_start_w = offset_rois_dptr[1] * spatial_scale - align_offset; const T roi_start_h = offset_rois_dptr[2] * spatial_scale - align_offset; const T roi_end_w = offset_rois_dptr[3] * spatial_scale - align_offset; const T roi_end_h = offset_rois_dptr[4] * spatial_scale - align_offset; T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; // aligned == false is for compatibility. the argument "aligned" doesn't have the semantic of // determining minimum roi size if (aligned == false) { roi_height = max(roi_height, static_cast<T>(1.0)); roi_width = max(roi_width, static_cast<T>(1.0)); } const T bin_height = static_cast<T>(roi_height) / static_cast<T>(pooled_height); const T bin_width = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const int32_t bin_grid_height = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); const int32_t bin_grid_width = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = max(bin_grid_height * bin_grid_width, 1); const T bin_diff_avg = out_diff_dptr[index] / count; T* in_diff_channel_dptr = in_diff_dptr + (n * channel_num + c) * height * width; FOR_RANGE(int64_t, grid_i, 0, bin_grid_height) { // + .5f for center position T y = roi_start_h + h * bin_height + static_cast<T>(grid_i + 0.5f) * bin_height / static_cast<T>(bin_grid_height); FOR_RANGE(int64_t, grid_j, 0, bin_grid_width) { T x = roi_start_w + w * bin_width + static_cast<T>(grid_j + 0.5f) * bin_width / static_cast<T>(bin_grid_width); T diff11 = 0; T diff21 = 0; T diff12 = 0; T diff22 = 0; int32_t x_low = 0; int32_t x_high = 0; int32_t y_low = 0; int32_t y_high = 0; bool has_diff = BilinearInterpolateDiff(bin_diff_avg, height, width, y, x, diff11, diff21, diff12, diff22, x_low, x_high, y_low, y_high); if (has_diff) { const int64_t q11 = y_low * width + x_low; const int64_t q21 = y_low * width + x_high; const int64_t q12 = y_high * width + x_low; const int64_t q22 = y_high * width + x_high; atomicAdd(in_diff_channel_dptr + q11, diff11); atomicAdd(in_diff_channel_dptr + q21, diff21); atomicAdd(in_diff_channel_dptr + q12, diff12); atomicAdd(in_diff_channel_dptr + q22, diff22); } } } } } } // namespace template<typename T> class RoIAlignKernel final : public user_op::OpKernel { public: RoIAlignKernel() = default; ~RoIAlignKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* x_blob = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* rois_blob = ctx->Tensor4ArgNameAndIndex("rois", 0); if (rois_blob->shape_view().elem_cnt() == 0) { return; } user_op::Tensor* y_blob = ctx->Tensor4ArgNameAndIndex("y", 0); const int32_t pooled_h = ctx->Attr<int32_t>("pooled_h"); const int32_t pooled_w = ctx->Attr<int32_t>("pooled_w"); const float spatial_scale = ctx->Attr<float>("spatial_scale"); const int32_t sampling_ratio = ctx->Attr<int32_t>("sampling_ratio"); const bool aligned = ctx->Attr<bool>("aligned"); const int64_t elem_cnt = y_blob->shape_view().elem_cnt(); RoiAlignForward<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>( elem_cnt, x_blob->dptr<T>(), rois_blob->dptr<T>(), spatial_scale, sampling_ratio, x_blob->shape_view().At(1), x_blob->shape_view().At(2), x_blob->shape_view().At(3), pooled_h, pooled_w, aligned, y_blob->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<typename T> class RoIAlignGradKernel final : public user_op::OpKernel { public: RoIAlignGradKernel() = default; ~RoIAlignGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { user_op::Tensor* dx_blob = ctx->Tensor4ArgNameAndIndex("dx", 0); if (dx_blob == nullptr) { return; } Memset<DeviceType::kCUDA>(ctx->stream(), dx_blob->mut_dptr<T>(), 0, dx_blob->shape_view().elem_cnt() * sizeof(T)); const user_op::Tensor* dy_blob = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* rois_blob = ctx->Tensor4ArgNameAndIndex("rois", 0); const int32_t pooled_h = ctx->Attr<int32_t>("pooled_h"); const int32_t pooled_w = ctx->Attr<int32_t>("pooled_w"); const float spatial_scale = ctx->Attr<float>("spatial_scale"); const int32_t sampling_ratio = ctx->Attr<int32_t>("sampling_ratio"); const bool aligned = ctx->Attr<bool>("aligned"); const int64_t elem_cnt = dy_blob->shape_view().elem_cnt(); if (elem_cnt > 0) { RoiAlignBackward<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>( elem_cnt, dy_blob->dptr<T>(), rois_blob->dptr<T>(), spatial_scale, sampling_ratio, dx_blob->shape_view().At(1), dx_blob->shape_view().At(2), dx_blob->shape_view().At(3), pooled_h, pooled_w, aligned, dx_blob->mut_dptr<T>()); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; REGISTER_USER_KERNEL("roi_align") .SetCreateFn<RoIAlignKernel<float>>() .SetIsMatchedHob(user_op::HobDeviceType() == DeviceType::kCUDA); REGISTER_USER_KERNEL("roi_align_grad") .SetCreateFn<RoIAlignGradKernel<float>>() .SetIsMatchedHob(user_op::HobDeviceType() == DeviceType::kCUDA); } // namespace oneflow
e60bb9a9ddd8b606c3ff45507e49cb90fb7f9ffd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; }
e60bb9a9ddd8b606c3ff45507e49cb90fb7f9ffd.cu
#include "includes.h" __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; }
093ed07510e1092c405248682d8dd82300093bcf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef TAU_KERNELS_CU #define TAU_KERNELS_CU //////////////////////////////////////////////////////////////// // // This is where the main Cuda kernels are declared // along with C++ wrappers, so that C++ files can // be compiled which call them. The only difference // between calling the Cuda kernel and the C++ // wrapper is that the grid and block dimensions // are defined in the first two arguments of the // wrapper. After that, everything is the same. // // The main two are the main multiplication kernel // and the trace kernel. // //////////////////////////////////////////////////////////////// #include "Tau_Kernels.h" void Multiply(dim3 dimGrid, dim3 dimBlock, cuFloatComplex *I, cuFloatComplex *T, cuFloatComplex *X) {hipLaunchKernelGGL(( MultiplyKernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, I, T, X); } __global__ void MultiplyKernel(cuFloatComplex *I, cuFloatComplex *T, cuFloatComplex *X) { #ifdef TEST_DEFS int k; #ifdef MULTIPLY_DIMS_1 int h = threadIdx.z/C_MATRIX_DIM; int i = threadIdx.z/2; int d = threadIdx.y; int t = threadIdx.x; int y = gridDim.y; int x = gridDim.x; int size = h*C_MATRIX_DIM*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + i*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + d*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM + y*PHI_MATRIX_DIM + x; #else int size = blockIdx.y*gridDim.x*blockDim.z*blockDim.y*blockDim.x + blockIdx.x*blockDim.z*blockDim.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x + threadIdx.y*blockDim.x + threadIdx.x; //int size = blockIdx.x*blockDim.x + threadIdx.x; //I like this sneakey trick. //There's probably a MUCH better way of doing this though... int x = size; int h = static_cast<int>( x/(C_MATRIX_DIM*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM) ); x -= h*C_MATRIX_DIM*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM; int i = static_cast<int>( x/(DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM) ); x -= i*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM; int d = static_cast<int>( x/(T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM) ); x -= d*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM; int t = static_cast<int>( x/(PHI_MATRIX_DIM*PHI_MATRIX_DIM) ); x -= t*PHI_MATRIX_DIM*PHI_MATRIX_DIM; int y = static_cast<int>( x/(PHI_MATRIX_DIM) ); x -= y*PHI_MATRIX_DIM; #endif cuFloatComplex Xtmp = make_cuFloatComplex(0,0); if(h==0) //T loop { #pragma unroll for(k=0; k<PHI_MATRIX_DIM;k++) { Xtmp = cuCaddf(Xtmp, cuCmulf( I[ i*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM + y*PHI_MATRIX_DIM + k ], T[ d*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM + k*PHI_MATRIX_DIM + x ] ) ); } } else //T^+ loop { #pragma unroll for(k=0; k<PHI_MATRIX_DIM;k++) { Xtmp = cuCaddf(Xtmp, cuCmulf( I[ i*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM + y*PHI_MATRIX_DIM + k ], cuConjf(T[ d*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM + x*PHI_MATRIX_DIM + k ]) //complex conjugate transpose ) ); } } X[size] = Xtmp; //X[size] = make_cuFloatComplex(x,y); #endif } void Trace(dim3 dimGrid, dim3 dimBlock, hipComplex *C, hipComplex *X, int d) {hipLaunchKernelGGL(( TraceKernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, C, X, d); } __global__ void TraceKernel(hipComplex *C, hipComplex *X, int d) { #ifdef TEST_DEFS int size = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; //int size = blockIdx.x*blockDim.x + blockIdx.y*blockDim.y + threadIdx.x; /*int j = size; int d_it = (int) j/(C_MATRIX_DIM*C_MATRIX_DIM); j -= d_it*C_MATRIX_DIM*C_MATRIX_DIM; int i = (int) j/C_MATRIX_DIM; j -= i*C_MATRIX_DIM;*/ int d_it = threadIdx.x; int i = blockIdx.x; int j = blockIdx.y; int a, b, t; int current_pos[2]; cuFloatComplex Ctmp = make_cuFloatComplex(0,0); #pragma unroll for(t=0; t<T_CHUNKSIZE; t++) //WORK ON THIS!! { current_pos[0] = i*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + d_it*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM; current_pos[1] = j*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + d_it*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM; #pragma unroll for(a=0; a<PHI_MATRIX_DIM; a++) { #pragma unroll for(b=0; b<PHI_MATRIX_DIM; b++) { Ctmp = cuCaddf(Ctmp, cuCmulf( X[ current_pos[0] + b*PHI_MATRIX_DIM + a ], X[ C_MATRIX_DIM*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + current_pos[1] + a*PHI_MATRIX_DIM + b ] ) ); } } } C[d*C_MATRIX_DIM*C_MATRIX_DIM + size] = cuCaddf(C[d*C_MATRIX_DIM*C_MATRIX_DIM + size] , Ctmp); //C[d*C_MATRIX_DIM*C_MATRIX_DIM + size] = make_cuFloatComplex(d,d*C_MATRIX_DIM*C_MATRIX_DIM + size); #endif } #endif
093ed07510e1092c405248682d8dd82300093bcf.cu
#ifndef TAU_KERNELS_CU #define TAU_KERNELS_CU //////////////////////////////////////////////////////////////// // // This is where the main Cuda kernels are declared // along with C++ wrappers, so that C++ files can // be compiled which call them. The only difference // between calling the Cuda kernel and the C++ // wrapper is that the grid and block dimensions // are defined in the first two arguments of the // wrapper. After that, everything is the same. // // The main two are the main multiplication kernel // and the trace kernel. // //////////////////////////////////////////////////////////////// #include "Tau_Kernels.h" void Multiply(dim3 dimGrid, dim3 dimBlock, cuFloatComplex *I, cuFloatComplex *T, cuFloatComplex *X) { MultiplyKernel<<< dimGrid, dimBlock >>>(I, T, X); } __global__ void MultiplyKernel(cuFloatComplex *I, cuFloatComplex *T, cuFloatComplex *X) { #ifdef TEST_DEFS int k; #ifdef MULTIPLY_DIMS_1 int h = threadIdx.z/C_MATRIX_DIM; int i = threadIdx.z/2; int d = threadIdx.y; int t = threadIdx.x; int y = gridDim.y; int x = gridDim.x; int size = h*C_MATRIX_DIM*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + i*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + d*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM + y*PHI_MATRIX_DIM + x; #else int size = blockIdx.y*gridDim.x*blockDim.z*blockDim.y*blockDim.x + blockIdx.x*blockDim.z*blockDim.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x + threadIdx.y*blockDim.x + threadIdx.x; //int size = blockIdx.x*blockDim.x + threadIdx.x; //I like this sneakey trick. //There's probably a MUCH better way of doing this though... int x = size; int h = static_cast<int>( x/(C_MATRIX_DIM*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM) ); x -= h*C_MATRIX_DIM*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM; int i = static_cast<int>( x/(DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM) ); x -= i*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM; int d = static_cast<int>( x/(T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM) ); x -= d*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM; int t = static_cast<int>( x/(PHI_MATRIX_DIM*PHI_MATRIX_DIM) ); x -= t*PHI_MATRIX_DIM*PHI_MATRIX_DIM; int y = static_cast<int>( x/(PHI_MATRIX_DIM) ); x -= y*PHI_MATRIX_DIM; #endif cuFloatComplex Xtmp = make_cuFloatComplex(0,0); if(h==0) //T loop { #pragma unroll for(k=0; k<PHI_MATRIX_DIM;k++) { Xtmp = cuCaddf(Xtmp, cuCmulf( I[ i*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM + y*PHI_MATRIX_DIM + k ], T[ d*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM + k*PHI_MATRIX_DIM + x ] ) ); } } else //T^+ loop { #pragma unroll for(k=0; k<PHI_MATRIX_DIM;k++) { Xtmp = cuCaddf(Xtmp, cuCmulf( I[ i*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM + y*PHI_MATRIX_DIM + k ], cuConjf(T[ d*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM + x*PHI_MATRIX_DIM + k ]) //complex conjugate transpose ) ); } } X[size] = Xtmp; //X[size] = make_cuFloatComplex(x,y); #endif } void Trace(dim3 dimGrid, dim3 dimBlock, cuComplex *C, cuComplex *X, int d) { TraceKernel<<< dimGrid, dimBlock >>>(C, X, d); } __global__ void TraceKernel(cuComplex *C, cuComplex *X, int d) { #ifdef TEST_DEFS int size = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; //int size = blockIdx.x*blockDim.x + blockIdx.y*blockDim.y + threadIdx.x; /*int j = size; int d_it = (int) j/(C_MATRIX_DIM*C_MATRIX_DIM); j -= d_it*C_MATRIX_DIM*C_MATRIX_DIM; int i = (int) j/C_MATRIX_DIM; j -= i*C_MATRIX_DIM;*/ int d_it = threadIdx.x; int i = blockIdx.x; int j = blockIdx.y; int a, b, t; int current_pos[2]; cuFloatComplex Ctmp = make_cuFloatComplex(0,0); #pragma unroll for(t=0; t<T_CHUNKSIZE; t++) //WORK ON THIS!! { current_pos[0] = i*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + d_it*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM; current_pos[1] = j*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + d_it*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + t*PHI_MATRIX_DIM*PHI_MATRIX_DIM; #pragma unroll for(a=0; a<PHI_MATRIX_DIM; a++) { #pragma unroll for(b=0; b<PHI_MATRIX_DIM; b++) { Ctmp = cuCaddf(Ctmp, cuCmulf( X[ current_pos[0] + b*PHI_MATRIX_DIM + a ], X[ C_MATRIX_DIM*DELTA_CHUNKSIZE*T_CHUNKSIZE*PHI_MATRIX_DIM*PHI_MATRIX_DIM + current_pos[1] + a*PHI_MATRIX_DIM + b ] ) ); } } } C[d*C_MATRIX_DIM*C_MATRIX_DIM + size] = cuCaddf(C[d*C_MATRIX_DIM*C_MATRIX_DIM + size] , Ctmp); //C[d*C_MATRIX_DIM*C_MATRIX_DIM + size] = make_cuFloatComplex(d,d*C_MATRIX_DIM*C_MATRIX_DIM + size); #endif } #endif
c64cc89c0f37eaa2afd23f4195a39f0104bbad09.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" __global__ void compute_kernel_1 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp = (ONE * threadIdx.x); if(tid < num_threads) { tmp = tmp + tmp * CONST; out[tid] = tmp; } } __global__ void compute_kernel_2 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_4 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_8 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_16 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_32 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_64 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_128 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_256 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_512 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_1024 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_2048 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_4096 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_8192 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } }
c64cc89c0f37eaa2afd23f4195a39f0104bbad09.cu
#include "common.h" __global__ void compute_kernel_1 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp = (ONE * threadIdx.x); if(tid < num_threads) { tmp = tmp + tmp * CONST; out[tid] = tmp; } } __global__ void compute_kernel_2 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_4 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_8 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_16 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_32 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_64 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_128 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_256 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_512 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_1024 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_2048 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_4096 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } } __global__ void compute_kernel_8192 (int num_threads, int flops, TYPE* out) { int tid; TYPE tmp1, tmp2; tid = threadIdx.x + blockDim.x * blockIdx.x; tmp1 = (ONE * threadIdx.x); tmp2 = (TWO * threadIdx.x); if(tid < num_threads) { tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp1 = tmp1 + tmp1 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; tmp2 = tmp2 + tmp2 * CONST; out[tid] = tmp1 + tmp2; } }
d86aec07ce0f6c579e6e5f2b0b5f19be33429552.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************** * Edgar A. Leon * Lawrence Livermore National Laboratory ******************************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hwloc.h> #include <sys/wait.h> #include <unistd.h> #ifdef HAVE_AMD_GPUS #include "hip/hip_runtime.h" #endif #define MAX_PCI_LEN 20 #define MAX_STR_LEN 512 int obj_attr_snprintf(char *str, size_t size, hwloc_obj_t obj, int verbose) { int nc=0; if (obj->type == HWLOC_OBJ_OS_DEVICE) switch (obj->attr->osdev.type) { case HWLOC_OBJ_OSDEV_COPROC : nc += hwloc_obj_type_snprintf(str+nc, size-nc, obj, 1); nc += snprintf(str+nc, size-nc, ": name=%s ", obj->name); nc += snprintf(str+nc, size-nc, "subtype=%s ", obj->subtype); nc += snprintf(str+nc, size-nc, "GPUModel=%s ", hwloc_obj_get_info_by_name(obj, "GPUModel")); nc += snprintf(str+nc, size-nc, " "); /* Get obj->infos in one shot */ nc += hwloc_obj_attr_snprintf(str+nc, size-nc, obj, " ", verbose); break; default: break; } return nc; } void set_vis_devs(char *str) { // Don't invoke any GPU calls before resetting the environment! // Otherwise, there's no effect of setting VISIBLE_DEVICES. //hipGetDeviceCount(&ndevs); //printf("Initial num. devices %d\n", ndevs); printf("Resetting environment to devices %s\n", str); unsetenv("ROCR_VISIBLE_DEVICES"); unsetenv("HIP_VISIBLE_DEVICES"); unsetenv("CUDA_VISIBLE_DEVICES"); #ifdef HAVE_AMD_GPUS setenv("ROCR_VISIBLE_DEVICES", str, 1); #else setenv("CUDA_VISIBLE_DEVICES", str, 1); #endif } void print_devices(hwloc_topology_t topo) { char str[MAX_STR_LEN]; hwloc_obj_t obj = NULL; while ( (obj=hwloc_get_next_obj_by_type(topo, HWLOC_OBJ_OS_DEVICE, obj)) != NULL ) if (obj->attr->osdev.type == HWLOC_OBJ_OSDEV_COPROC) { str[0] = '\0'; obj_attr_snprintf(str, MAX_STR_LEN, obj, 0); printf("%s\n", str); } } int get_list_len(char *lst) { // Copy VISDEVS string since strtok modifies the input string char tmp[strlen(lst)]; strcpy(tmp, lst); /* Get list size */ int idevs = 0; char *token = strtok(tmp, ","); while( token != NULL ) { idevs++; token = strtok(NULL, ","); } return idevs; } void test_wdup(char *visdevs, hwloc_topology_t topo) { set_vis_devs(visdevs); hwloc_topology_t topo2; printf("Duplicating the topology\n"); hwloc_topology_dup(&topo2, topo); set_vis_devs(visdevs); print_devices(topo2); hwloc_topology_destroy(topo2); } void test_wfork(char *vds) { set_vis_devs(vds); pid_t cpid = fork(); if (cpid == 0) { unsetenv("ROCR_VISIBLE_DEVICES"); unsetenv("HIP_VISIBLE_DEVICES"); printf("Child:\n"); set_vis_devs(vds); hwloc_topology_t topo; hwloc_topology_init(&topo); hwloc_topology_set_io_types_filter(topo, HWLOC_TYPE_FILTER_KEEP_IMPORTANT); hwloc_topology_load(topo); print_devices(topo); hwloc_topology_destroy(topo); exit(0); } else if (cpid > 0) { printf("Parent: Nothing to do but wait...\n"); wait(NULL); } else { printf("fork() failed\n"); } } void test_wnew_topo(char *vds) { set_vis_devs(vds); hwloc_topology_t topo; hwloc_topology_init(&topo); hwloc_topology_set_io_types_filter(topo, HWLOC_TYPE_FILTER_KEEP_IMPORTANT); hwloc_topology_load(topo); print_devices(topo); hwloc_topology_destroy(topo); } void test_wdev_api(char *vds) { int i, odevs=-1; /* Cannot call the device driver before settting VISIBLE DEVICES. Otherwise, the devices are set and cannot be changed */ //hipGetDeviceCount(&odevs); //printf("Modified num. devices %d\n", odevs); set_vis_devs(vds); hipGetDeviceCount(&odevs); printf("Modified num. devices %d\n", odevs); /* Get device PCI ID */ char pci[MAX_PCI_LEN]; for (i=0; i<odevs; i++) { pci[0] = '\0'; hipDeviceGetPCIBusId(pci, MAX_PCI_LEN, i); printf("PCI ID of device %d = %s\n", i, pci); } } void test_wfork_api(char *vds) { int i, odevs=-1; /* Don't call into device functions until after setting visible devices */ //hipGetDeviceCount(&odevs); //printf("Num. devices %d\n", odevs); set_vis_devs(vds); hipGetDeviceCount(&odevs); printf("Num. devices %d\n", odevs); pid_t cpid = fork(); if (cpid == 0) { unsetenv("ROCR_VISIBLE_DEVICES"); unsetenv("HIP_VISIBLE_DEVICES"); printf("Child:\n"); set_vis_devs(vds); hipGetDeviceCount(&odevs); printf("Num. devices %d\n", odevs); /* Get device PCI ID */ char pci[MAX_PCI_LEN]; for (i=0; i<odevs; i++) { pci[0] = '\0'; hipDeviceGetPCIBusId(pci, MAX_PCI_LEN, i); printf("PCI ID of device %d = %s\n", i, pci); } exit(0); } else if (cpid > 0) { printf("Parent: Nothing to do but wait...\n"); wait(NULL); } else { printf("fork() failed\n"); } } /* Lessons learned: 1. Setting VISIBLE DEVICES in the context of hwloc: The environmnet variables must be set before the first time the topology is loaded. 2. Setting VISIBLE DEVICES in the context of device API calls: The environment variables must be called before the first invocation of a device function. 3. Using fork does not really allows to overwrite the points above. 4. hwloc loading a topology has the same effect as calling a device function, i.e., after this setting VISIBLE DEVICES is too late. */ int main(int argc, char *argv[]) { char vds[] = "1"; //int idevs = get_list_len(vds); hwloc_topology_t topo; hwloc_topology_init(&topo); /* OS devices are filtered by default, enable to see GPUs */ hwloc_topology_set_type_filter(topo, HWLOC_OBJ_OS_DEVICE, HWLOC_TYPE_FILTER_KEEP_IMPORTANT); /* Include PCI devices to determine whether two GPUs are the same device, i.e., opencl1d1 and cuda1 */ hwloc_topology_set_type_filter(topo, HWLOC_OBJ_PCI_DEVICE, HWLOC_TYPE_FILTER_KEEP_IMPORTANT); /* Setting visible devices must be done before loading the topology the first time! */ set_vis_devs(vds); /* If testing whether VISIBLE DEVICES work with the device API functions, don't load the topology because this set the devices and can't be changed later */ hwloc_topology_load(topo); //print_devices(topo); #if 1 test_wnew_topo(vds); #endif #if 0 test_wdup(vds, topo); #endif #if 0 test_wfork(vds); #endif #if 0 test_wdev_api(vds); #endif #if 0 test_wfork_api(vds); #endif hwloc_topology_destroy(topo); return 0; }
d86aec07ce0f6c579e6e5f2b0b5f19be33429552.cu
/****************************************************** * Edgar A. Leon * Lawrence Livermore National Laboratory ******************************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hwloc.h> #include <sys/wait.h> #include <unistd.h> #ifdef HAVE_AMD_GPUS #include "hip/hip_runtime.h" #endif #define MAX_PCI_LEN 20 #define MAX_STR_LEN 512 int obj_attr_snprintf(char *str, size_t size, hwloc_obj_t obj, int verbose) { int nc=0; if (obj->type == HWLOC_OBJ_OS_DEVICE) switch (obj->attr->osdev.type) { case HWLOC_OBJ_OSDEV_COPROC : nc += hwloc_obj_type_snprintf(str+nc, size-nc, obj, 1); nc += snprintf(str+nc, size-nc, ": name=%s ", obj->name); nc += snprintf(str+nc, size-nc, "subtype=%s ", obj->subtype); nc += snprintf(str+nc, size-nc, "GPUModel=%s ", hwloc_obj_get_info_by_name(obj, "GPUModel")); nc += snprintf(str+nc, size-nc, " "); /* Get obj->infos in one shot */ nc += hwloc_obj_attr_snprintf(str+nc, size-nc, obj, " ", verbose); break; default: break; } return nc; } void set_vis_devs(char *str) { // Don't invoke any GPU calls before resetting the environment! // Otherwise, there's no effect of setting VISIBLE_DEVICES. //cudaGetDeviceCount(&ndevs); //printf("Initial num. devices %d\n", ndevs); printf("Resetting environment to devices %s\n", str); unsetenv("ROCR_VISIBLE_DEVICES"); unsetenv("HIP_VISIBLE_DEVICES"); unsetenv("CUDA_VISIBLE_DEVICES"); #ifdef HAVE_AMD_GPUS setenv("ROCR_VISIBLE_DEVICES", str, 1); #else setenv("CUDA_VISIBLE_DEVICES", str, 1); #endif } void print_devices(hwloc_topology_t topo) { char str[MAX_STR_LEN]; hwloc_obj_t obj = NULL; while ( (obj=hwloc_get_next_obj_by_type(topo, HWLOC_OBJ_OS_DEVICE, obj)) != NULL ) if (obj->attr->osdev.type == HWLOC_OBJ_OSDEV_COPROC) { str[0] = '\0'; obj_attr_snprintf(str, MAX_STR_LEN, obj, 0); printf("%s\n", str); } } int get_list_len(char *lst) { // Copy VISDEVS string since strtok modifies the input string char tmp[strlen(lst)]; strcpy(tmp, lst); /* Get list size */ int idevs = 0; char *token = strtok(tmp, ","); while( token != NULL ) { idevs++; token = strtok(NULL, ","); } return idevs; } void test_wdup(char *visdevs, hwloc_topology_t topo) { set_vis_devs(visdevs); hwloc_topology_t topo2; printf("Duplicating the topology\n"); hwloc_topology_dup(&topo2, topo); set_vis_devs(visdevs); print_devices(topo2); hwloc_topology_destroy(topo2); } void test_wfork(char *vds) { set_vis_devs(vds); pid_t cpid = fork(); if (cpid == 0) { unsetenv("ROCR_VISIBLE_DEVICES"); unsetenv("HIP_VISIBLE_DEVICES"); printf("Child:\n"); set_vis_devs(vds); hwloc_topology_t topo; hwloc_topology_init(&topo); hwloc_topology_set_io_types_filter(topo, HWLOC_TYPE_FILTER_KEEP_IMPORTANT); hwloc_topology_load(topo); print_devices(topo); hwloc_topology_destroy(topo); exit(0); } else if (cpid > 0) { printf("Parent: Nothing to do but wait...\n"); wait(NULL); } else { printf("fork() failed\n"); } } void test_wnew_topo(char *vds) { set_vis_devs(vds); hwloc_topology_t topo; hwloc_topology_init(&topo); hwloc_topology_set_io_types_filter(topo, HWLOC_TYPE_FILTER_KEEP_IMPORTANT); hwloc_topology_load(topo); print_devices(topo); hwloc_topology_destroy(topo); } void test_wdev_api(char *vds) { int i, odevs=-1; /* Cannot call the device driver before settting VISIBLE DEVICES. Otherwise, the devices are set and cannot be changed */ //cudaGetDeviceCount(&odevs); //printf("Modified num. devices %d\n", odevs); set_vis_devs(vds); cudaGetDeviceCount(&odevs); printf("Modified num. devices %d\n", odevs); /* Get device PCI ID */ char pci[MAX_PCI_LEN]; for (i=0; i<odevs; i++) { pci[0] = '\0'; cudaDeviceGetPCIBusId(pci, MAX_PCI_LEN, i); printf("PCI ID of device %d = %s\n", i, pci); } } void test_wfork_api(char *vds) { int i, odevs=-1; /* Don't call into device functions until after setting visible devices */ //cudaGetDeviceCount(&odevs); //printf("Num. devices %d\n", odevs); set_vis_devs(vds); cudaGetDeviceCount(&odevs); printf("Num. devices %d\n", odevs); pid_t cpid = fork(); if (cpid == 0) { unsetenv("ROCR_VISIBLE_DEVICES"); unsetenv("HIP_VISIBLE_DEVICES"); printf("Child:\n"); set_vis_devs(vds); cudaGetDeviceCount(&odevs); printf("Num. devices %d\n", odevs); /* Get device PCI ID */ char pci[MAX_PCI_LEN]; for (i=0; i<odevs; i++) { pci[0] = '\0'; cudaDeviceGetPCIBusId(pci, MAX_PCI_LEN, i); printf("PCI ID of device %d = %s\n", i, pci); } exit(0); } else if (cpid > 0) { printf("Parent: Nothing to do but wait...\n"); wait(NULL); } else { printf("fork() failed\n"); } } /* Lessons learned: 1. Setting VISIBLE DEVICES in the context of hwloc: The environmnet variables must be set before the first time the topology is loaded. 2. Setting VISIBLE DEVICES in the context of device API calls: The environment variables must be called before the first invocation of a device function. 3. Using fork does not really allows to overwrite the points above. 4. hwloc loading a topology has the same effect as calling a device function, i.e., after this setting VISIBLE DEVICES is too late. */ int main(int argc, char *argv[]) { char vds[] = "1"; //int idevs = get_list_len(vds); hwloc_topology_t topo; hwloc_topology_init(&topo); /* OS devices are filtered by default, enable to see GPUs */ hwloc_topology_set_type_filter(topo, HWLOC_OBJ_OS_DEVICE, HWLOC_TYPE_FILTER_KEEP_IMPORTANT); /* Include PCI devices to determine whether two GPUs are the same device, i.e., opencl1d1 and cuda1 */ hwloc_topology_set_type_filter(topo, HWLOC_OBJ_PCI_DEVICE, HWLOC_TYPE_FILTER_KEEP_IMPORTANT); /* Setting visible devices must be done before loading the topology the first time! */ set_vis_devs(vds); /* If testing whether VISIBLE DEVICES work with the device API functions, don't load the topology because this set the devices and can't be changed later */ hwloc_topology_load(topo); //print_devices(topo); #if 1 test_wnew_topo(vds); #endif #if 0 test_wdup(vds, topo); #endif #if 0 test_wfork(vds); #endif #if 0 test_wdev_api(vds); #endif #if 0 test_wfork_api(vds); #endif hwloc_topology_destroy(topo); return 0; }
d25f2fa33e2c00334d11c3af6efb32284823f98e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduceNeighboredSmem(int *g_idata, int *g_odata, unsigned int n) { __shared__ int smem[DIM]; // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; smem[tid] = idata[tid]; __syncthreads(); // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { smem[tid] += smem[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; }
d25f2fa33e2c00334d11c3af6efb32284823f98e.cu
#include "includes.h" __global__ void reduceNeighboredSmem(int *g_idata, int *g_odata, unsigned int n) { __shared__ int smem[DIM]; // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; smem[tid] = idata[tid]; __syncthreads(); // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { smem[tid] += smem[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; }
9db5eb02ff3899149eecc96216e20751a915aca1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "convertDepthImageToMeter_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_depth_image_meter = NULL; hipMalloc(&d_depth_image_meter, XSIZE*YSIZE); const unsigned int *d_depth_image_millimeter = NULL; hipMalloc(&d_depth_image_millimeter, XSIZE*YSIZE); int n_rows = 1; int n_cols = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( convertDepthImageToMeter_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_depth_image_meter,d_depth_image_millimeter,n_rows,n_cols); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( convertDepthImageToMeter_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_depth_image_meter,d_depth_image_millimeter,n_rows,n_cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( convertDepthImageToMeter_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_depth_image_meter,d_depth_image_millimeter,n_rows,n_cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9db5eb02ff3899149eecc96216e20751a915aca1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "convertDepthImageToMeter_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_depth_image_meter = NULL; cudaMalloc(&d_depth_image_meter, XSIZE*YSIZE); const unsigned int *d_depth_image_millimeter = NULL; cudaMalloc(&d_depth_image_millimeter, XSIZE*YSIZE); int n_rows = 1; int n_cols = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); convertDepthImageToMeter_kernel<<<gridBlock,threadBlock>>>(d_depth_image_meter,d_depth_image_millimeter,n_rows,n_cols); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { convertDepthImageToMeter_kernel<<<gridBlock,threadBlock>>>(d_depth_image_meter,d_depth_image_millimeter,n_rows,n_cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { convertDepthImageToMeter_kernel<<<gridBlock,threadBlock>>>(d_depth_image_meter,d_depth_image_millimeter,n_rows,n_cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
49a71d405a1976e60c58b18da9218004c3a2883b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include<stdint.h> #include<stdlib.h> #include<cuda.h> #define WID 1024 #define HEI 1024 #pragma pack(push,1) typedef struct tagBITMAPFILEHEADER { unsigned short bfType; uint32_t bfSize; unsigned short bfReserved1; unsigned short bfReserved2; uint32_t bf0ffBits; }BITMAPFILEHEADER; #pragma pack(pop) typedef struct tagBITMAPINFOHEADER { uint32_t biSize; int32_t biWidth; int32_t biHeight; unsigned short biPlanes; unsigned short biBitCount; uint32_t biCompression; uint32_t biSizeImage; int32_t biXPelsPerMeter; int32_t biYPelsPerMeter; uint32_t biCirUsed; uint32_t biCirImportant; }BITMAPINFOHEADER; typedef struct tagRGBQUAD { unsigned char rgbBlue; unsigned char rgbGreen; unsigned char rgbRed; unsigned char rgbReserved; }RGBQUAD; typedef struct tagBITMAPINFO { BITMAPINFOHEADER bmiHeader; RGBQUAD bmiColors[1]; }BITMAPINFO; __global__ void distance_gpu(int *x_d,int *y_d,double *z_d,double *img_buf_d,int *tensuu_d) { int i,j,k; double kankaku,hatyou,goukei; hatyou=0.633; kankaku=10.5; goukei=2.0*M_PI*kankaku/hatyou; for(i=0;i<HEI;i++){ for(j=0;j<WID;j++){ for(k=0;k<*tensuu_d;k++){ img_buf_d[i*WID+j]=img_buf_d[i*WID+j]+cos(goukei*sqrt((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k])+z_d[k]*z_d[k])); } } } } int main(){ int tensuu; BITMAPFILEHEADER BmpFileHeader; BITMAPINFOHEADER BmpInfoHeader; RGBQUAD RGBQuad[256]; FILE *fp; int i,j; BmpFileHeader.bfType =19778; BmpFileHeader.bfSize =14+40+1024+(WID*HEI); BmpFileHeader.bfReserved1 =0; BmpFileHeader.bfReserved2 =0; BmpFileHeader.bf0ffBits =14+40+1024; BmpInfoHeader.biSize =40; BmpInfoHeader.biWidth =WID; BmpInfoHeader.biHeight =HEI; BmpInfoHeader.biPlanes =1; BmpInfoHeader.biBitCount =8; //256 BmpInfoHeader.biCompression =0L; BmpInfoHeader.biSizeImage =0L; BmpInfoHeader.biXPelsPerMeter =0L; BmpInfoHeader.biYPelsPerMeter =0L; BmpInfoHeader.biCirUsed =0L; BmpInfoHeader.biCirImportant =0L; for(i=0;i<256;i++){ RGBQuad[i].rgbBlue =i; RGBQuad[i].rgbGreen =i; RGBQuad[i].rgbRed =i; RGBQuad[i].rgbReserved =0; } char filename[20]={}; printf(" : "); scanf("%s",filename); fp=fopen(filename,"rb"); if(fp==NULL){ printf("\n"); } fread(&tensuu,sizeof(int),1,fp); printf("%d\n",tensuu); int x[tensuu]; int y[tensuu]; double z[tensuu]; int *tensuu_d; hipMalloc((void**)&tensuu_d,sizeof(int)); hipMemcpy(tensuu_d,&tensuu,sizeof(int),hipMemcpyHostToDevice); int *x_d,*y_d; double *z_d; double *img_buf_d; dim3 blocks(1,1,1); dim3 threads(1,1,1); int x_buf,y_buf,z_buf; for(i=0;i<tensuu;i++){ fread(&x_buf,sizeof(int),1,fp); fread(&y_buf,sizeof(int),1,fp); fread(&z_buf,sizeof(int),1,fp); x[i]=x_buf*40+512; y[i]=y_buf*40+512; z[i]=((double)z_buf)*40+100000.0; } fclose(fp); hipMalloc((void**)&x_d,tensuu*sizeof(int)); hipMalloc((void**)&y_d,tensuu*sizeof(int)); hipMalloc((void**)&z_d,tensuu*sizeof(double)); hipMalloc((void**)&img_buf_d,WID*HEI*sizeof(double)); double *img_buf; img_buf=(double *)malloc(sizeof(double)*WID*HEI); for(i=0;i<WID*HEI;i++){ img_buf[i]=0.0; } hipMemcpy(x_d,x,tensuu*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(y_d,y,tensuu*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(z_d,z,tensuu*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(double),hipMemcpyHostToDevice); hipLaunchKernelGGL(( distance_gpu), dim3(blocks),dim3(threads), 0, 0, x_d,y_d,z_d,img_buf_d,tensuu_d); hipMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(double),hipMemcpyDeviceToHost); double min,max,mid; min=img_buf[0]; max=img_buf[0]; for(i=0;i<HEI;i++){ for(j=0;j<WID;j++){ if(min>img_buf[i*WID+j]){ min=img_buf[i*WID+j]; } if(max<img_buf[i*WID+j]){ max=img_buf[i*WID+j]; } } } mid=0.5*(min+max); printf("min = %lf max = %lf mid = %lf\n",min,max,mid); unsigned char *img; img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI); for(i=0;i<WID*HEI;i++){ if(img_buf[i]<mid){ img[i]=0; } if(img_buf[i]>mid){ img[i]=255; } } FILE *fp1; fp1=fopen("cgh_root_gpu.bmp","wb"); if(fp1==NULL){ printf("\n"); } fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1); fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1); fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1); fwrite(img,sizeof(unsigned char),WID*HEI,fp1); free(img); free(img_buf); fclose(fp1); hipFree(tensuu_d); hipFree(x_d); hipFree(y_d); hipFree(z_d); hipFree(img_buf_d); return 0; }
49a71d405a1976e60c58b18da9218004c3a2883b.cu
#include <stdio.h> #include <math.h> #include<stdint.h> #include<stdlib.h> #include<cuda.h> #define WID 1024 #define HEI 1024 #pragma pack(push,1) typedef struct tagBITMAPFILEHEADER { unsigned short bfType; uint32_t bfSize; unsigned short bfReserved1; unsigned short bfReserved2; uint32_t bf0ffBits; }BITMAPFILEHEADER; #pragma pack(pop) typedef struct tagBITMAPINFOHEADER { uint32_t biSize; int32_t biWidth; int32_t biHeight; unsigned short biPlanes; unsigned short biBitCount; uint32_t biCompression; uint32_t biSizeImage; int32_t biXPelsPerMeter; int32_t biYPelsPerMeter; uint32_t biCirUsed; uint32_t biCirImportant; }BITMAPINFOHEADER; typedef struct tagRGBQUAD { unsigned char rgbBlue; unsigned char rgbGreen; unsigned char rgbRed; unsigned char rgbReserved; }RGBQUAD; typedef struct tagBITMAPINFO { BITMAPINFOHEADER bmiHeader; RGBQUAD bmiColors[1]; }BITMAPINFO; __global__ void distance_gpu(int *x_d,int *y_d,double *z_d,double *img_buf_d,int *tensuu_d) { int i,j,k; double kankaku,hatyou,goukei; hatyou=0.633; kankaku=10.5; goukei=2.0*M_PI*kankaku/hatyou; for(i=0;i<HEI;i++){ for(j=0;j<WID;j++){ for(k=0;k<*tensuu_d;k++){ img_buf_d[i*WID+j]=img_buf_d[i*WID+j]+cos(goukei*sqrt((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k])+z_d[k]*z_d[k])); } } } } int main(){ int tensuu; BITMAPFILEHEADER BmpFileHeader; BITMAPINFOHEADER BmpInfoHeader; RGBQUAD RGBQuad[256]; FILE *fp; int i,j; BmpFileHeader.bfType =19778; BmpFileHeader.bfSize =14+40+1024+(WID*HEI); BmpFileHeader.bfReserved1 =0; BmpFileHeader.bfReserved2 =0; BmpFileHeader.bf0ffBits =14+40+1024; BmpInfoHeader.biSize =40; BmpInfoHeader.biWidth =WID; BmpInfoHeader.biHeight =HEI; BmpInfoHeader.biPlanes =1; BmpInfoHeader.biBitCount =8; //256階調 BmpInfoHeader.biCompression =0L; BmpInfoHeader.biSizeImage =0L; BmpInfoHeader.biXPelsPerMeter =0L; BmpInfoHeader.biYPelsPerMeter =0L; BmpInfoHeader.biCirUsed =0L; BmpInfoHeader.biCirImportant =0L; for(i=0;i<256;i++){ RGBQuad[i].rgbBlue =i; RGBQuad[i].rgbGreen =i; RGBQuad[i].rgbRed =i; RGBQuad[i].rgbReserved =0; } char filename[20]={}; printf("ファイル名を入力してください : "); scanf("%s",filename); fp=fopen(filename,"rb"); if(fp==NULL){ printf("ファイルオープンエラー\n"); } fread(&tensuu,sizeof(int),1,fp); printf("物体点数は%dです\n",tensuu); int x[tensuu]; int y[tensuu]; double z[tensuu]; int *tensuu_d; cudaMalloc((void**)&tensuu_d,sizeof(int)); cudaMemcpy(tensuu_d,&tensuu,sizeof(int),cudaMemcpyHostToDevice); int *x_d,*y_d; double *z_d; double *img_buf_d; dim3 blocks(1,1,1); dim3 threads(1,1,1); int x_buf,y_buf,z_buf; for(i=0;i<tensuu;i++){ fread(&x_buf,sizeof(int),1,fp); fread(&y_buf,sizeof(int),1,fp); fread(&z_buf,sizeof(int),1,fp); x[i]=x_buf*40+512; y[i]=y_buf*40+512; z[i]=((double)z_buf)*40+100000.0; } fclose(fp); cudaMalloc((void**)&x_d,tensuu*sizeof(int)); cudaMalloc((void**)&y_d,tensuu*sizeof(int)); cudaMalloc((void**)&z_d,tensuu*sizeof(double)); cudaMalloc((void**)&img_buf_d,WID*HEI*sizeof(double)); double *img_buf; img_buf=(double *)malloc(sizeof(double)*WID*HEI); for(i=0;i<WID*HEI;i++){ img_buf[i]=0.0; } cudaMemcpy(x_d,x,tensuu*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(y_d,y,tensuu*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(z_d,z,tensuu*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(double),cudaMemcpyHostToDevice); distance_gpu<<<blocks,threads>>>(x_d,y_d,z_d,img_buf_d,tensuu_d); cudaMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(double),cudaMemcpyDeviceToHost); double min,max,mid; min=img_buf[0]; max=img_buf[0]; for(i=0;i<HEI;i++){ for(j=0;j<WID;j++){ if(min>img_buf[i*WID+j]){ min=img_buf[i*WID+j]; } if(max<img_buf[i*WID+j]){ max=img_buf[i*WID+j]; } } } mid=0.5*(min+max); printf("min = %lf max = %lf mid = %lf\n",min,max,mid); unsigned char *img; img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI); for(i=0;i<WID*HEI;i++){ if(img_buf[i]<mid){ img[i]=0; } if(img_buf[i]>mid){ img[i]=255; } } FILE *fp1; fp1=fopen("cgh_root_gpu.bmp","wb"); if(fp1==NULL){ printf("ファイルオープンエラー\n"); } fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1); fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1); fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1); fwrite(img,sizeof(unsigned char),WID*HEI,fp1); free(img); free(img_buf); fclose(fp1); cudaFree(tensuu_d); cudaFree(x_d); cudaFree(y_d); cudaFree(z_d); cudaFree(img_buf_d); return 0; }
43cf43338f33eaedc076fc56c6e1f635cac721f0.hip
// !!! This is a file automatically generated by hipify!!! /// LSU EE 4702-1 (Fall 2018), GPU Programming // /// Simple CUDA Example, without LSU ECE helper classes. /// References // // :ccpg8: CUDA C Programming Guide Version 8 // https://docs.nvidia.com/cuda/cuda-c-programming-guide #if 0 /// Background /// CUDA // // NVIDIA's system for programming NVIDIA GPUs. // // Intended for non-graphical computation, widely used for // scientific computation. // /// CUDA Components. // // - CUDA C // Language used for writing code that runs on the GPU. // // - CUDA Runtime API // Library used for managing the execution of code on the GPU. // // - CUDA Compiler Toolchain // The "compiler" nvcc, and related tools. // // - CUDA Compatible GPU // Probably just NVIDIA GPUs. /// CUDA C // // Language used for writing code that runs on the GPU. // // A file can contain both CUDA C and C for the host compiler ... // ... that is the case for this file. // // In this file CUDA C is in routine: cuda_thread_start() // // Syntactically similar to C++. // // Major Differences // // Executes as a hierarchy of threads. // // Specialized address spaces. /// CUDA C Runtime API // // Library calls used on CPU side to manage execution on GPU. // // Activities Performed with API // // o Send data from CPU to GPU. // o Start execution of GPU code. // o Send data from GPU to CPU. /// CUDA Address Spaces // /// Global Address Space // // Works like "regular" memory on CPU, but it's usually separated. // Uses the same hardware as OpenGL buffer objects. /// Constant Address Space // // Limited amount of storage, read-only on GPU. // Probably uses the same hardware as OpenGL uniforms. /// The Whole Process // // - Install CUDA toolkit and NVIDIA drivers. // - Write hello.cu. // - Compile with nvcc // - Run. /// Typical Program Structure // // - Prepare input data on CPU. // - Send data from CPU to GPU. // - Launch kernel. // - Send data from GPU to CPU. /// Multithreaded Execution // // :Def: Thread // A path of execution through a program. // Most beginner programs have one thread. // // :Def: Multithreaded Execution // Execution of a program using more than one thread. // On CPUs, program starts with one thread (in main), additional // threads are requested by programmer. #endif #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <ctype.h> #include <time.h> #include <new> #include <hip/hip_runtime.h> /// CUDA API Error-Checking Wrapper /// #define CE(call) \ { \ const hipError_t rv = call; \ if ( rv != hipSuccess ) \ { \ printf("CUDA error %d, %s\n",rv,hipGetErrorString(rv)); \ exit(1); \ } \ } double time_fp() { struct timespec tp; clock_gettime(CLOCK_REALTIME,&tp); return ((double)tp.tv_sec)+((double)tp.tv_nsec) * 0.000000001; } struct App { int num_threads; int array_size; float *v_in; float *m_out; float *d_v_in; float *d_m_out; }; // In host address space. App app; // In device constant address space. __constant__ App d_app; __global__ void cuda_thread_start() { // Compute a unique ID for this thread. const int tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= d_app.num_threads ) return; // Warning: The order in which d_v_in is accessed is inefficient. // See demo-cuda-02-basics for a better ordering. // const int elt_per_thread = d_app.array_size / d_app.num_threads; const int start = elt_per_thread * tid; const int stop = start + elt_per_thread; # pragma unroll 1 for ( int h=start; h<stop; h++ ) d_app.d_m_out[h] = d_app.d_v_in[h] + 1; } int main(int argc, char **argv) { const int nt_raw = argc < 2 ? 1 : atoi(argv[1]); app.num_threads = abs(nt_raw); app.array_size = argc < 3 ? 1 << 20 : int( atof(argv[2]) * (1<<20) ); const int array_size_bytes = app.array_size * sizeof(app.v_in[0]); const int out_array_size_bytes = app.array_size * sizeof(app.m_out[0]); // Allocate storage for CPU copy of data. // app.v_in = new float[app.array_size]; app.m_out = new float[app.array_size]; // Allocate storage for GPU copy of data. // CE( hipMalloc( &app.d_v_in, array_size_bytes ) ); CE( hipMalloc( &app.d_m_out, out_array_size_bytes ) ); printf("Preparing for %d threads %d elements.\n", app.num_threads, app.array_size); // Initialize input array. // for ( int i=0; i<app.array_size; i++ ) app.v_in[i] = drand48(); const double time_start = time_fp(); // Copy input array from CPU to GPU. // CE( hipMemcpy ( app.d_v_in, app.v_in, array_size_bytes, hipMemcpyHostToDevice ) ); // Copy App structure to GPU. // CE( hipMemcpyToSymbol ( d_app, &app, sizeof(app), 0, hipMemcpyHostToDevice ) ); const int threads_per_block = 256; const int blocks_per_grid = ( app.num_threads + threads_per_block-1 ) / threads_per_block; /// KERNEL LAUNCH hipLaunchKernelGGL(( cuda_thread_start), dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, ); // --- <-- CUDA C kernel launch syntax -> --- // Copy output array from GPU to CPU. // CE( hipMemcpy ( app.m_out, app.d_m_out, out_array_size_bytes, hipMemcpyDeviceToHost) ); const double data_size = app.array_size * ( sizeof(app.v_in[0]) + sizeof(app.m_out[0]) ); const double fp_op_count = app.array_size * 5; const double elapsed_time = time_fp() - time_start; printf("Elapsed time for %d threads and %d elements is %.3f s\n", app.num_threads, app.array_size, 1e6 * elapsed_time); printf("Rate %.3f GFLOPS, %.3f GB/s\n", 1e-9 * fp_op_count / elapsed_time, 1e-9 * data_size / elapsed_time); }
43cf43338f33eaedc076fc56c6e1f635cac721f0.cu
/// LSU EE 4702-1 (Fall 2018), GPU Programming // /// Simple CUDA Example, without LSU ECE helper classes. /// References // // :ccpg8: CUDA C Programming Guide Version 8 // https://docs.nvidia.com/cuda/cuda-c-programming-guide #if 0 /// Background /// CUDA // // NVIDIA's system for programming NVIDIA GPUs. // // Intended for non-graphical computation, widely used for // scientific computation. // /// CUDA Components. // // - CUDA C // Language used for writing code that runs on the GPU. // // - CUDA Runtime API // Library used for managing the execution of code on the GPU. // // - CUDA Compiler Toolchain // The "compiler" nvcc, and related tools. // // - CUDA Compatible GPU // Probably just NVIDIA GPUs. /// CUDA C // // Language used for writing code that runs on the GPU. // // A file can contain both CUDA C and C for the host compiler ... // ... that is the case for this file. // // In this file CUDA C is in routine: cuda_thread_start() // // Syntactically similar to C++. // // Major Differences // // Executes as a hierarchy of threads. // // Specialized address spaces. /// CUDA C Runtime API // // Library calls used on CPU side to manage execution on GPU. // // Activities Performed with API // // o Send data from CPU to GPU. // o Start execution of GPU code. // o Send data from GPU to CPU. /// CUDA Address Spaces // /// Global Address Space // // Works like "regular" memory on CPU, but it's usually separated. // Uses the same hardware as OpenGL buffer objects. /// Constant Address Space // // Limited amount of storage, read-only on GPU. // Probably uses the same hardware as OpenGL uniforms. /// The Whole Process // // - Install CUDA toolkit and NVIDIA drivers. // - Write hello.cu. // - Compile with nvcc // - Run. /// Typical Program Structure // // - Prepare input data on CPU. // - Send data from CPU to GPU. // - Launch kernel. // - Send data from GPU to CPU. /// Multithreaded Execution // // :Def: Thread // A path of execution through a program. // Most beginner programs have one thread. // // :Def: Multithreaded Execution // Execution of a program using more than one thread. // On CPUs, program starts with one thread (in main), additional // threads are requested by programmer. #endif #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <ctype.h> #include <time.h> #include <new> #include <cuda_runtime.h> /// CUDA API Error-Checking Wrapper /// #define CE(call) \ { \ const cudaError_t rv = call; \ if ( rv != cudaSuccess ) \ { \ printf("CUDA error %d, %s\n",rv,cudaGetErrorString(rv)); \ exit(1); \ } \ } double time_fp() { struct timespec tp; clock_gettime(CLOCK_REALTIME,&tp); return ((double)tp.tv_sec)+((double)tp.tv_nsec) * 0.000000001; } struct App { int num_threads; int array_size; float *v_in; float *m_out; float *d_v_in; float *d_m_out; }; // In host address space. App app; // In device constant address space. __constant__ App d_app; __global__ void cuda_thread_start() { // Compute a unique ID for this thread. const int tid = threadIdx.x + blockIdx.x * blockDim.x; if ( tid >= d_app.num_threads ) return; // Warning: The order in which d_v_in is accessed is inefficient. // See demo-cuda-02-basics for a better ordering. // const int elt_per_thread = d_app.array_size / d_app.num_threads; const int start = elt_per_thread * tid; const int stop = start + elt_per_thread; # pragma unroll 1 for ( int h=start; h<stop; h++ ) d_app.d_m_out[h] = d_app.d_v_in[h] + 1; } int main(int argc, char **argv) { const int nt_raw = argc < 2 ? 1 : atoi(argv[1]); app.num_threads = abs(nt_raw); app.array_size = argc < 3 ? 1 << 20 : int( atof(argv[2]) * (1<<20) ); const int array_size_bytes = app.array_size * sizeof(app.v_in[0]); const int out_array_size_bytes = app.array_size * sizeof(app.m_out[0]); // Allocate storage for CPU copy of data. // app.v_in = new float[app.array_size]; app.m_out = new float[app.array_size]; // Allocate storage for GPU copy of data. // CE( cudaMalloc( &app.d_v_in, array_size_bytes ) ); CE( cudaMalloc( &app.d_m_out, out_array_size_bytes ) ); printf("Preparing for %d threads %d elements.\n", app.num_threads, app.array_size); // Initialize input array. // for ( int i=0; i<app.array_size; i++ ) app.v_in[i] = drand48(); const double time_start = time_fp(); // Copy input array from CPU to GPU. // CE( cudaMemcpy ( app.d_v_in, app.v_in, array_size_bytes, cudaMemcpyHostToDevice ) ); // Copy App structure to GPU. // CE( cudaMemcpyToSymbol ( d_app, &app, sizeof(app), 0, cudaMemcpyHostToDevice ) ); const int threads_per_block = 256; const int blocks_per_grid = ( app.num_threads + threads_per_block-1 ) / threads_per_block; /// KERNEL LAUNCH cuda_thread_start<<< blocks_per_grid, threads_per_block >>>(); // --- <-- CUDA C kernel launch syntax -> --- // Copy output array from GPU to CPU. // CE( cudaMemcpy ( app.m_out, app.d_m_out, out_array_size_bytes, cudaMemcpyDeviceToHost) ); const double data_size = app.array_size * ( sizeof(app.v_in[0]) + sizeof(app.m_out[0]) ); const double fp_op_count = app.array_size * 5; const double elapsed_time = time_fp() - time_start; printf("Elapsed time for %d threads and %d elements is %.3f µs\n", app.num_threads, app.array_size, 1e6 * elapsed_time); printf("Rate %.3f GFLOPS, %.3f GB/s\n", 1e-9 * fp_op_count / elapsed_time, 1e-9 * data_size / elapsed_time); }
8eff5f732f82a1bd603442987b673b2d729f879d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <quda_internal.h> #include <llfat_quda.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <read_gauge.h> #include <gauge_quda.h> #include <force_common.h> #if (__CUDA_ARCH__ >= 200) #define SITE_MATRIX_LOAD_TEX 1 #define MULINK_LOAD_TEX 1 #define FATLINK_LOAD_TEX 1 #else #define SITE_MATRIX_LOAD_TEX 0 #define MULINK_LOAD_TEX 1 #define FATLINK_LOAD_TEX 1 #endif #define WRITE_FAT_MATRIX(gauge, dir, idx)do { \ gauge[idx + dir*9*llfat_ga_stride] = FAT0; \ gauge[idx + (dir*9+1) * llfat_ga_stride] = FAT1; \ gauge[idx + (dir*9+2) * llfat_ga_stride] = FAT2; \ gauge[idx + (dir*9+3) * llfat_ga_stride] = FAT3; \ gauge[idx + (dir*9+4) * llfat_ga_stride] = FAT4; \ gauge[idx + (dir*9+5) * llfat_ga_stride] = FAT5; \ gauge[idx + (dir*9+6) * llfat_ga_stride] = FAT6; \ gauge[idx + (dir*9+7) * llfat_ga_stride] = FAT7; \ gauge[idx + (dir*9+8) * llfat_ga_stride] = FAT8;} while(0) #define WRITE_STAPLE_MATRIX(gauge, idx) \ gauge[idx] = STAPLE0; \ gauge[idx + staple_stride] = STAPLE1; \ gauge[idx + 2*staple_stride] = STAPLE2; \ gauge[idx + 3*staple_stride] = STAPLE3; \ gauge[idx + 4*staple_stride] = STAPLE4; \ gauge[idx + 5*staple_stride] = STAPLE5; \ gauge[idx + 6*staple_stride] = STAPLE6; \ gauge[idx + 7*staple_stride] = STAPLE7; \ gauge[idx + 8*staple_stride] = STAPLE8; #define SCALAR_MULT_SU3_MATRIX(a, b, c) \ c##00_re = a*b##00_re; \ c##00_im = a*b##00_im; \ c##01_re = a*b##01_re; \ c##01_im = a*b##01_im; \ c##02_re = a*b##02_re; \ c##02_im = a*b##02_im; \ c##10_re = a*b##10_re; \ c##10_im = a*b##10_im; \ c##11_re = a*b##11_re; \ c##11_im = a*b##11_im; \ c##12_re = a*b##12_re; \ c##12_im = a*b##12_im; \ c##20_re = a*b##20_re; \ c##20_im = a*b##20_im; \ c##21_re = a*b##21_re; \ c##21_im = a*b##21_im; \ c##22_re = a*b##22_re; \ c##22_im = a*b##22_im; \ #define LOAD_MATRIX_18_SINGLE(gauge, dir, idx, var, stride) \ float2 var##0 = gauge[idx + dir*9*stride]; \ float2 var##1 = gauge[idx + dir*9*stride + stride]; \ float2 var##2 = gauge[idx + dir*9*stride + 2*stride]; \ float2 var##3 = gauge[idx + dir*9*stride + 3*stride]; \ float2 var##4 = gauge[idx + dir*9*stride + 4*stride]; \ float2 var##5 = gauge[idx + dir*9*stride + 5*stride]; \ float2 var##6 = gauge[idx + dir*9*stride + 6*stride]; \ float2 var##7 = gauge[idx + dir*9*stride + 7*stride]; \ float2 var##8 = gauge[idx + dir*9*stride + 8*stride]; #define LOAD_MATRIX_18_SINGLE_TEX(gauge, dir, idx, var, stride) \ float2 var##0 = tex1Dfetch(gauge, idx + dir*9*stride); \ float2 var##1 = tex1Dfetch(gauge, idx + dir*9*stride + stride); \ float2 var##2 = tex1Dfetch(gauge, idx + dir*9*stride + 2*stride); \ float2 var##3 = tex1Dfetch(gauge, idx + dir*9*stride + 3*stride); \ float2 var##4 = tex1Dfetch(gauge, idx + dir*9*stride + 4*stride); \ float2 var##5 = tex1Dfetch(gauge, idx + dir*9*stride + 5*stride); \ float2 var##6 = tex1Dfetch(gauge, idx + dir*9*stride + 6*stride); \ float2 var##7 = tex1Dfetch(gauge, idx + dir*9*stride + 7*stride); \ float2 var##8 = tex1Dfetch(gauge, idx + dir*9*stride + 8*stride); #define LOAD_MATRIX_18_DOUBLE(gauge, dir, idx, var, stride) \ double2 var##0 = gauge[idx + dir*9*stride]; \ double2 var##1 = gauge[idx + dir*9*stride + stride]; \ double2 var##2 = gauge[idx + dir*9*stride + 2*stride]; \ double2 var##3 = gauge[idx + dir*9*stride + 3*stride]; \ double2 var##4 = gauge[idx + dir*9*stride + 4*stride]; \ double2 var##5 = gauge[idx + dir*9*stride + 5*stride]; \ double2 var##6 = gauge[idx + dir*9*stride + 6*stride]; \ double2 var##7 = gauge[idx + dir*9*stride + 7*stride]; \ double2 var##8 = gauge[idx + dir*9*stride + 8*stride]; #define LOAD_MATRIX_18_DOUBLE_TEX(gauge, dir, idx, var, stride) \ double2 var##0 = fetch_double2(gauge, idx + dir*9*stride); \ double2 var##1 = fetch_double2(gauge, idx + dir*9*stride + stride); \ double2 var##2 = fetch_double2(gauge, idx + dir*9*stride + 2*stride); \ double2 var##3 = fetch_double2(gauge, idx + dir*9*stride + 3*stride); \ double2 var##4 = fetch_double2(gauge, idx + dir*9*stride + 4*stride); \ double2 var##5 = fetch_double2(gauge, idx + dir*9*stride + 5*stride); \ double2 var##6 = fetch_double2(gauge, idx + dir*9*stride + 6*stride); \ double2 var##7 = fetch_double2(gauge, idx + dir*9*stride + 7*stride); \ double2 var##8 = fetch_double2(gauge, idx + dir*9*stride + 8*stride); #define LOAD_MATRIX_12_SINGLE_DECLARE(gauge, dir, idx, var, stride) \ float2 var##0 = gauge[idx + dir*6*stride]; \ float2 var##1 = gauge[idx + dir*6*stride + stride]; \ float2 var##2 = gauge[idx + dir*6*stride + 2*stride]; \ float2 var##3 = gauge[idx + dir*6*stride + 3*stride]; \ float2 var##4 = gauge[idx + dir*6*stride + 4*stride]; \ float2 var##5 = gauge[idx + dir*6*stride + 5*stride]; \ float2 var##6, var##7, var##8; #define LOAD_MATRIX_12_SINGLE_TEX_DECLARE(gauge, dir, idx, var, stride) \ float2 var##0 = tex1Dfetch(gauge, idx + dir*6*stride); \ float2 var##1 = tex1Dfetch(gauge, idx + dir*6*stride + stride); \ float2 var##2 = tex1Dfetch(gauge, idx + dir*6*stride + 2*stride); \ float2 var##3 = tex1Dfetch(gauge, idx + dir*6*stride + 3*stride); \ float2 var##4 = tex1Dfetch(gauge, idx + dir*6*stride + 4*stride); \ float2 var##5 = tex1Dfetch(gauge, idx + dir*6*stride + 5*stride); \ float2 var##6, var##7, var##8; #define LOAD_MATRIX_18_SINGLE_DECLARE(gauge, dir, idx, var, stride) \ float2 var##0 = gauge[idx + dir*9*stride]; \ float2 var##1 = gauge[idx + dir*9*stride + stride]; \ float2 var##2 = gauge[idx + dir*9*stride + 2*stride]; \ float2 var##3 = gauge[idx + dir*9*stride + 3*stride]; \ float2 var##4 = gauge[idx + dir*9*stride + 4*stride]; \ float2 var##5 = gauge[idx + dir*9*stride + 5*stride]; \ float2 var##6 = gauge[idx + dir*9*stride + 6*stride]; \ float2 var##7 = gauge[idx + dir*9*stride + 7*stride]; \ float2 var##8 = gauge[idx + dir*9*stride + 8*stride]; #define LOAD_MATRIX_18_SINGLE_TEX_DECLARE(gauge, dir, idx, var, stride) \ float2 var##0 = tex1Dfetch(gauge, idx + dir*9*stride); \ float2 var##1 = tex1Dfetch(gauge, idx + dir*9*stride + stride); \ float2 var##2 = tex1Dfetch(gauge, idx + dir*9*stride + 2*stride); \ float2 var##3 = tex1Dfetch(gauge, idx + dir*9*stride + 3*stride); \ float2 var##4 = tex1Dfetch(gauge, idx + dir*9*stride + 4*stride); \ float2 var##5 = tex1Dfetch(gauge, idx + dir*9*stride + 5*stride); \ float2 var##6 = tex1Dfetch(gauge, idx + dir*9*stride + 6*stride); \ float2 var##7 = tex1Dfetch(gauge, idx + dir*9*stride + 7*stride); \ float2 var##8 = tex1Dfetch(gauge, idx + dir*9*stride + 8*stride); #define LOAD_MATRIX_18_DOUBLE_DECLARE(gauge, dir, idx, var, stride) \ double2 var##0 = gauge[idx + dir*9*stride]; \ double2 var##1 = gauge[idx + dir*9*stride + stride]; \ double2 var##2 = gauge[idx + dir*9*stride + 2*stride]; \ double2 var##3 = gauge[idx + dir*9*stride + 3*stride]; \ double2 var##4 = gauge[idx + dir*9*stride + 4*stride]; \ double2 var##5 = gauge[idx + dir*9*stride + 5*stride]; \ double2 var##6 = gauge[idx + dir*9*stride + 6*stride]; \ double2 var##7 = gauge[idx + dir*9*stride + 7*stride]; \ double2 var##8 = gauge[idx + dir*9*stride + 8*stride]; #define LOAD_MATRIX_18_DOUBLE_TEX_DECLARE(gauge, dir, idx, var, stride) \ double2 var##0 = fetch_double2(gauge, idx + dir*9*stride); \ double2 var##1 = fetch_double2(gauge, idx + dir*9*stride + stride); \ double2 var##2 = fetch_double2(gauge, idx + dir*9*stride + 2*stride); \ double2 var##3 = fetch_double2(gauge, idx + dir*9*stride + 3*stride); \ double2 var##4 = fetch_double2(gauge, idx + dir*9*stride + 4*stride); \ double2 var##5 = fetch_double2(gauge, idx + dir*9*stride + 5*stride); \ double2 var##6 = fetch_double2(gauge, idx + dir*9*stride + 6*stride); \ double2 var##7 = fetch_double2(gauge, idx + dir*9*stride + 7*stride); \ double2 var##8 = fetch_double2(gauge, idx + dir*9*stride + 8*stride); #define LOAD_MATRIX_12_DOUBLE_DECLARE(gauge, dir, idx, var, stride) \ double2 var##0 = gauge[idx + dir*6*stride]; \ double2 var##1 = gauge[idx + dir*6*stride + stride]; \ double2 var##2 = gauge[idx + dir*6*stride + 2*stride]; \ double2 var##3 = gauge[idx + dir*6*stride + 3*stride]; \ double2 var##4 = gauge[idx + dir*6*stride + 4*stride]; \ double2 var##5 = gauge[idx + dir*6*stride + 5*stride]; \ double2 var##6, var##7, var##8; #define LOAD_MATRIX_12_DOUBLE_TEX_DECLARE(gauge, dir, idx, var, stride) \ double2 var##0 = fetch_double2(gauge, idx + dir*6*stride); \ double2 var##1 = fetch_double2(gauge, idx + dir*6*stride + stride); \ double2 var##2 = fetch_double2(gauge, idx + dir*6*stride + 2*stride); \ double2 var##3 = fetch_double2(gauge, idx + dir*6*stride + 3*stride); \ double2 var##4 = fetch_double2(gauge, idx + dir*6*stride + 4*stride); \ double2 var##5 = fetch_double2(gauge, idx + dir*6*stride + 5*stride); \ double2 var##6, var##7, var##8; #define LLFAT_ADD_SU3_MATRIX(ma, mb, mc) \ mc##00_re = ma##00_re + mb##00_re; \ mc##00_im = ma##00_im + mb##00_im; \ mc##01_re = ma##01_re + mb##01_re; \ mc##01_im = ma##01_im + mb##01_im; \ mc##02_re = ma##02_re + mb##02_re; \ mc##02_im = ma##02_im + mb##02_im; \ mc##10_re = ma##10_re + mb##10_re; \ mc##10_im = ma##10_im + mb##10_im; \ mc##11_re = ma##11_re + mb##11_re; \ mc##11_im = ma##11_im + mb##11_im; \ mc##12_re = ma##12_re + mb##12_re; \ mc##12_im = ma##12_im + mb##12_im; \ mc##20_re = ma##20_re + mb##20_re; \ mc##20_im = ma##20_im + mb##20_im; \ mc##21_re = ma##21_re + mb##21_re; \ mc##21_im = ma##21_im + mb##21_im; \ mc##22_re = ma##22_re + mb##22_re; \ mc##22_im = ma##22_im + mb##22_im; __constant__ int dir1_array[16]; __constant__ int dir2_array[16]; __constant__ int last_proc_in_tdim; __constant__ int first_proc_in_tdim; unsigned long staple_bytes=0; void llfat_init_cuda(QudaGaugeParam* param) { static int llfat_init_cuda_flag = 0; if (llfat_init_cuda_flag){ return; } llfat_init_cuda_flag = 1; init_kernel_cuda(param); int Vh = param->X[0]*param->X[1]*param->X[2]*param->X[3]/2; int site_ga_stride = param->site_ga_pad + Vh; int staple_stride = param->staple_pad + Vh; int llfat_ga_stride = param->llfat_ga_pad + Vh; hipMemcpyToSymbol("site_ga_stride", &site_ga_stride, sizeof(int)); hipMemcpyToSymbol("staple_stride", &staple_stride, sizeof(int)); hipMemcpyToSymbol("llfat_ga_stride", &llfat_ga_stride, sizeof(int)); int dir1[16]; int dir2[16]; for(int nu =0; nu < 4; nu++) for(int mu=0; mu < 4; mu++){ if(nu == mu) continue; int d1, d2; for(d1=0; d1 < 4; d1 ++){ if(d1 != nu && d1 != mu){ break; } } dir1[nu*4+mu] = d1; for(d2=0; d2 < 4; d2 ++){ if(d2 != nu && d2 != mu && d2 != d1){ break; } } dir2[nu*4+mu] = d2; } hipMemcpyToSymbol("dir1_array", &dir1, sizeof(dir1)); hipMemcpyToSymbol("dir2_array", &dir2, sizeof(dir2)); int first_proc_in_tdim = 0; int last_proc_in_tdim = 0; if(commCoords(3) == (commDim(3) -1)){ last_proc_in_tdim = 1; } if(commCoords(3) == 0){ first_proc_in_tdim = 1; } hipMemcpyToSymbol("last_proc_in_tdim", &last_proc_in_tdim, sizeof(int)); hipMemcpyToSymbol("first_proc_in_tdim", &first_proc_in_tdim, sizeof(int)); } #define LLFAT_CONCAT(a,b) a##b##Kernel #define LLFAT_KERNEL(a,b) LLFAT_CONCAT(a,b) //precision: 0 is for double, 1 is for single //single precision, common macro #define PRECISION 1 #define Float float #define LOAD_FAT_MATRIX(gauge, dir, idx) LOAD_MATRIX_18_SINGLE(gauge, dir, idx, FAT, llfat_ga_stride) #if (MULINK_LOAD_TEX == 1) #define LOAD_EVEN_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX((odd_bit?muLink1TexSingle:muLink0TexSingle), dir, idx, var, staple_stride) #define LOAD_ODD_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX((odd_bit?muLink0TexSingle:muLink1TexSingle), dir, idx, var, staple_stride) #else #define LOAD_EVEN_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE(mulink_even, dir, idx, var, staple_stride) #define LOAD_ODD_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE(mulink_odd, dir, idx, var, staple_stride) #endif #if (FATLINK_LOAD_TEX == 1) #define LOAD_EVEN_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_SINGLE_TEX((odd_bit?fatGauge1TexSingle:fatGauge0TexSingle), dir, idx, FAT, llfat_ga_stride); #define LOAD_ODD_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_SINGLE_TEX((odd_bit?fatGauge0TexSingle:fatGauge1TexSingle), dir, idx, FAT, llfat_ga_stride); #else #define LOAD_EVEN_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_SINGLE(fatlink_even, dir, idx, FAT, llfat_ga_stride) #define LOAD_ODD_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_SINGLE(fatlink_odd, dir, idx, FAT, llfat_ga_stride) #endif //single precision, 12-reconstruct #define SITELINK0TEX siteLink0TexSingle #define SITELINK1TEX siteLink1TexSingle #if (SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX_DECLARE((odd_bit?SITELINK1TEX:SITELINK0TEX), dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX_DECLARE((odd_bit?SITELINK0TEX:SITELINK1TEX), dir, idx, var, site_ga_stride) #else #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_DECLARE(sitelink_even, dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_DECLARE(sitelink_odd, dir, idx, var, site_ga_stride) #endif #define LOAD_SITE_MATRIX(sitelink, dir, idx, var) LOAD_MATRIX_12_SINGLE_DECLARE(sitelink, dir, idx, var, site_ga_stride) #define RECONSTRUCT_SITE_LINK(dir, idx, sign, var) RECONSTRUCT_LINK_12(dir, idx, sign, var); #define FloatN float2 #define FloatM float2 #define RECONSTRUCT 12 #include "llfat_core.h" #undef SITELINK0TEX #undef SITELINK1TEX #undef LOAD_EVEN_SITE_MATRIX #undef LOAD_ODD_SITE_MATRIX #undef LOAD_SITE_MATRIX #undef RECONSTRUCT_SITE_LINK #undef FloatN #undef FloatM #undef RECONSTRUCT //single precision, 18-reconstruct #define SITELINK0TEX siteLink0TexSingle_norecon #define SITELINK1TEX siteLink1TexSingle_norecon #if (SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX_DECLARE((odd_bit?SITELINK1TEX:SITELINK0TEX), dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX_DECLARE((odd_bit?SITELINK0TEX:SITELINK1TEX), dir, idx, var, site_ga_stride) #else #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_DECLARE(sitelink_even, dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_DECLARE(sitelink_odd, dir, idx, var, site_ga_stride) #endif #define LOAD_SITE_MATRIX(sitelink, dir, idx, var) LOAD_MATRIX_18_SINGLE(sitelink, dir, idx, var, site_ga_stride) #define RECONSTRUCT_SITE_LINK(dir, idx, sign, var) #define FloatN float2 #define FloatM float2 #define RECONSTRUCT 18 #include "llfat_core.h" #undef SITELINK0TEX #undef SITELINK1TEX #undef LOAD_EVEN_SITE_MATRIX #undef LOAD_ODD_SITE_MATRIX #undef LOAD_SITE_MATRIX #undef RECONSTRUCT_SITE_LINK #undef FloatN #undef FloatM #undef RECONSTRUCT #undef PRECISION #undef Float #undef LOAD_FAT_MATRIX #undef LOAD_EVEN_MULINK_MATRIX #undef LOAD_ODD_MULINK_MATRIX #undef LOAD_EVEN_FAT_MATRIX #undef LOAD_ODD_FAT_MATRIX //double precision, common macro #define PRECISION 0 #define Float double #define LOAD_FAT_MATRIX(gauge, dir, idx) LOAD_MATRIX_18_DOUBLE(gauge, dir, idx, FAT, llfat_ga_stride) #if (MULINK_LOAD_TEX == 1) #define LOAD_EVEN_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX((odd_bit?muLink1TexDouble:muLink0TexDouble), dir, idx, var, staple_stride) #define LOAD_ODD_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX((odd_bit?muLink0TexDouble:muLink1TexDouble), dir, idx, var, staple_stride) #else #define LOAD_EVEN_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE(mulink_even, dir, idx, var, staple_stride) #define LOAD_ODD_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE(mulink_odd, dir, idx, var, staple_stride) #endif #if (FATLINK_LOAD_TEX == 1) #define LOAD_EVEN_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_DOUBLE_TEX((odd_bit?fatGauge1TexDouble:fatGauge0TexDouble), dir, idx, FAT, llfat_ga_stride) #define LOAD_ODD_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_DOUBLE_TEX((odd_bit?fatGauge0TexDouble:fatGauge1TexDouble), dir, idx, FAT, llfat_ga_stride) #else #define LOAD_EVEN_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_DOUBLE(fatlink_even, dir, idx, FAT, llfat_ga_stride) #define LOAD_ODD_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_DOUBLE(fatlink_odd, dir, idx, FAT, llfat_ga_stride) #endif //double precision, 18-reconstruct #define SITELINK0TEX siteLink0TexDouble #define SITELINK1TEX siteLink1TexDouble #if (SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX_DECLARE((odd_bit?SITELINK1TEX:SITELINK0TEX), dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX_DECLARE((odd_bit?SITELINK0TEX:SITELINK1TEX), dir, idx, var, site_ga_stride) #else #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_DECLARE(sitelink_even, dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_DECLARE(sitelink_odd, dir, idx, var, site_ga_stride) #endif #define LOAD_SITE_MATRIX(sitelink, dir, idx, var) LOAD_MATRIX_18_DOUBLE(sitelink, dir, idx, var, site_ga_stride) #define RECONSTRUCT_SITE_LINK(dir, idx, sign, var) #define FloatN double2 #define FloatM double2 #define RECONSTRUCT 18 #include "llfat_core.h" #undef SITELINK0TEX #undef SITELINK1TEX #undef LOAD_EVEN_SITE_MATRIX #undef LOAD_ODD_SITE_MATRIX #undef LOAD_SITE_MATRIX #undef RECONSTRUCT_SITE_LINK #undef FloatN #undef FloatM #undef RECONSTRUCT #if 1 //double precision, 12-reconstruct #define SITELINK0TEX siteLink0TexDouble #define SITELINK1TEX siteLink1TexDouble #if (SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX_DECLARE((odd_bit?SITELINK1TEX:SITELINK0TEX), dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX_DECLARE((odd_bit?SITELINK0TEX:SITELINK1TEX), dir, idx, var, site_ga_stride) #else #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_DECLARE(sitelink_even, dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_DECLARE(sitelink_odd, dir, idx, var, site_ga_stride) #endif #define LOAD_SITE_MATRIX(sitelink, dir, idx, var) LOAD_MATRIX_12_DOUBLE_DECLARE(sitelink, dir, idx, var, site_ga_stride) #define RECONSTRUCT_SITE_LINK(dir, idx, sign, var) RECONSTRUCT_LINK_12(dir, idx, sign, var); #define FloatN double2 #define FloatM double2 #define RECONSTRUCT 12 #include "llfat_core.h" #undef SITELINK0TEX #undef SITELINK1TEX #undef LOAD_EVEN_SITE_MATRIX #undef LOAD_ODD_SITE_MATRIX #undef LOAD_SITE_MATRIX #undef RECONSTRUCT_SITE_LINK #undef FloatN #undef FloatM #undef RECONSTRUCT #endif #undef PRECISION #undef Float #undef LOAD_FAT_MATRIX #undef LOAD_EVEN_MULINK_MATRIX #undef LOAD_ODD_MULINK_MATRIX #undef LOAD_EVEN_FAT_MATRIX #undef LOAD_ODD_FAT_MATRIX #undef LLFAT_CONCAT #undef LLFAT_KERNEL #define UNBIND_ALL_TEXTURE do{ \ if(prec ==QUDA_DOUBLE_PRECISION){ \ hipUnbindTexture(siteLink0TexDouble); \ hipUnbindTexture(siteLink1TexDouble); \ hipUnbindTexture(fatGauge0TexDouble); \ hipUnbindTexture(fatGauge1TexDouble); \ hipUnbindTexture(muLink0TexDouble); \ hipUnbindTexture(muLink1TexDouble); \ }else{ \ if(cudaSiteLink.reconstruct == QUDA_RECONSTRUCT_NO){ \ hipUnbindTexture(siteLink0TexSingle_norecon); \ hipUnbindTexture(siteLink1TexSingle_norecon); \ }else{ \ hipUnbindTexture(siteLink0TexSingle); \ hipUnbindTexture(siteLink1TexSingle); \ } \ hipUnbindTexture(fatGauge0TexSingle); \ hipUnbindTexture(fatGauge1TexSingle); \ hipUnbindTexture(muLink0TexSingle); \ hipUnbindTexture(muLink1TexSingle); \ } \ }while(0) #define UNBIND_SITE_AND_FAT_LINK do{ \ if(prec == QUDA_DOUBLE_PRECISION){ \ hipUnbindTexture(siteLink0TexDouble); \ hipUnbindTexture(siteLink1TexDouble); \ hipUnbindTexture(fatGauge0TexDouble); \ hipUnbindTexture(fatGauge1TexDouble); \ }else { \ if(cudaSiteLink.reconstruct == QUDA_RECONSTRUCT_NO){ \ hipUnbindTexture(siteLink0TexSingle_norecon); \ hipUnbindTexture(siteLink1TexSingle_norecon); \ }else{ \ hipUnbindTexture(siteLink0TexSingle); \ hipUnbindTexture(siteLink1TexSingle); \ } \ hipUnbindTexture(fatGauge0TexSingle); \ hipUnbindTexture(fatGauge1TexSingle); \ } \ }while(0) #define BIND_MU_LINK() do{ \ if(prec == QUDA_DOUBLE_PRECISION){ \ hipBindTexture(0, muLink0TexDouble, mulink_even, staple_bytes); \ hipBindTexture(0, muLink1TexDouble, mulink_odd, staple_bytes); \ }else{ \ hipBindTexture(0, muLink0TexSingle, mulink_even, staple_bytes); \ hipBindTexture(0, muLink1TexSingle, mulink_odd, staple_bytes); \ } \ }while(0) #define UNBIND_MU_LINK() do{ \ if(prec == QUDA_DOUBLE_PRECISION){ \ hipUnbindTexture(muLink0TexSingle); \ hipUnbindTexture(muLink1TexSingle); \ }else{ \ hipUnbindTexture(muLink0TexDouble); \ hipUnbindTexture(muLink1TexDouble); \ } \ }while(0) #define BIND_SITE_AND_FAT_LINK do { \ if(prec == QUDA_DOUBLE_PRECISION){ \ hipBindTexture(0, siteLink0TexDouble, cudaSiteLink.even, cudaSiteLink.bytes); \ hipBindTexture(0, siteLink1TexDouble, cudaSiteLink.odd, cudaSiteLink.bytes); \ hipBindTexture(0, fatGauge0TexDouble, cudaFatLink.even, cudaFatLink.bytes); \ hipBindTexture(0, fatGauge1TexDouble, cudaFatLink.odd, cudaFatLink.bytes); \ }else{ \ if(cudaSiteLink.reconstruct == QUDA_RECONSTRUCT_NO){ \ hipBindTexture(0, siteLink0TexSingle_norecon, cudaSiteLink.even, cudaSiteLink.bytes); \ hipBindTexture(0, siteLink1TexSingle_norecon, cudaSiteLink.odd, cudaSiteLink.bytes); \ }else{ \ hipBindTexture(0, siteLink0TexSingle, cudaSiteLink.even, cudaSiteLink.bytes); \ hipBindTexture(0, siteLink1TexSingle, cudaSiteLink.odd, cudaSiteLink.bytes); \ } \ hipBindTexture(0, fatGauge0TexSingle, cudaFatLink.even, cudaFatLink.bytes); \ hipBindTexture(0, fatGauge1TexSingle, cudaFatLink.odd, cudaFatLink.bytes); \ } \ }while(0) #define BIND_MU_LINK() do{ \ if(prec == QUDA_DOUBLE_PRECISION){ \ hipBindTexture(0, muLink0TexDouble, mulink_even, staple_bytes); \ hipBindTexture(0, muLink1TexDouble, mulink_odd, staple_bytes); \ }else{ \ hipBindTexture(0, muLink0TexSingle, mulink_even, staple_bytes); \ hipBindTexture(0, muLink1TexSingle, mulink_odd, staple_bytes); \ } \ }while(0) #define UNBIND_MU_LINK() do{ \ if(prec == QUDA_DOUBLE_PRECISION){ \ hipUnbindTexture(muLink0TexSingle); \ hipUnbindTexture(muLink1TexSingle); \ }else{ \ hipUnbindTexture(muLink0TexDouble); \ hipUnbindTexture(muLink1TexDouble); \ } \ }while(0) #define BIND_SITE_AND_FAT_LINK_REVERSE do { \ if(prec == QUDA_DOUBLE_PRECISION){ \ hipBindTexture(0, siteLink1TexDouble, cudaSiteLink.even, cudaSiteLink.bytes); \ hipBindTexture(0, siteLink0TexDouble, cudaSiteLink.odd, cudaSiteLink.bytes); \ hipBindTexture(0, fatGauge1TexDouble, cudaFatLink.even, cudaFatLink.bytes); \ hipBindTexture(0, fatGauge0TexDouble, cudaFatLink.odd, cudaFatLink.bytes); \ }else{ \ if(cudaSiteLink.reconstruct == QUDA_RECONSTRUCT_NO){ \ hipBindTexture(0, siteLink1TexSingle_norecon, cudaSiteLink.even, cudaSiteLink.bytes); \ hipBindTexture(0, siteLink0TexSingle_norecon, cudaSiteLink.odd, cudaSiteLink.bytes); \ }else{ \ hipBindTexture(0, siteLink1TexSingle, cudaSiteLink.even, cudaSiteLink.bytes); \ hipBindTexture(0, siteLink0TexSingle, cudaSiteLink.odd, cudaSiteLink.bytes); \ } \ hipBindTexture(0, fatGauge1TexSingle, cudaFatLink.even, cudaFatLink.bytes); \ hipBindTexture(0, fatGauge0TexSingle, cudaFatLink.odd, cudaFatLink.bytes); \ } \ }while(0) #define ENUMERATE_FUNCS(mu,nu) switch(mu) { \ case 0: \ switch(nu){ \ case 0: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 1: \ CALL_FUNCTION(0,1); \ break; \ case 2: \ CALL_FUNCTION(0,2); \ break; \ case 3: \ CALL_FUNCTION(0,3); \ break; \ } \ break; \ case 1: \ switch(nu){ \ case 0: \ CALL_FUNCTION(1,0); \ break; \ case 1: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 2: \ CALL_FUNCTION(1,2); \ break; \ case 3: \ CALL_FUNCTION(1,3); \ break; \ } \ break; \ case 2: \ switch(nu){ \ case 0: \ CALL_FUNCTION(2,0); \ break; \ case 1: \ CALL_FUNCTION(2,1); \ break; \ case 2: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 3: \ CALL_FUNCTION(2,3); \ break; \ } \ break; \ case 3: \ switch(nu){ \ case 0: \ CALL_FUNCTION(3,0); \ break; \ case 1: \ CALL_FUNCTION(3,1); \ break; \ case 2: \ CALL_FUNCTION(3,2); \ break; \ case 3: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ } \ break; \ } #define ENUMERATE_FUNCS_SAVE(mu,nu, save_staple) if(save_staple){ \ switch(mu) { \ case 0: \ switch(nu){ \ case 0: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 1: \ CALL_FUNCTION(0,1,1); \ break; \ case 2: \ CALL_FUNCTION(0,2,1); \ break; \ case 3: \ CALL_FUNCTION(0,3,1); \ break; \ } \ break; \ case 1: \ switch(nu){ \ case 0: \ CALL_FUNCTION(1,0,1); \ break; \ case 1: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 2: \ CALL_FUNCTION(1,2,1); \ break; \ case 3: \ CALL_FUNCTION(1,3,1); \ break; \ } \ break; \ case 2: \ switch(nu){ \ case 0: \ CALL_FUNCTION(2,0,1); \ break; \ case 1: \ CALL_FUNCTION(2,1,1); \ break; \ case 2: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 3: \ CALL_FUNCTION(2,3,1); \ break; \ } \ break; \ case 3: \ switch(nu){ \ case 0: \ CALL_FUNCTION(3,0,1); \ break; \ case 1: \ CALL_FUNCTION(3,1,1); \ break; \ case 2: \ CALL_FUNCTION(3,2,1); \ break; \ case 3: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ } \ break; \ } \ }else{ \ switch(mu) { \ case 0: \ switch(nu){ \ case 0: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 1: \ CALL_FUNCTION(0,1,0); \ break; \ case 2: \ CALL_FUNCTION(0,2,0); \ break; \ case 3: \ CALL_FUNCTION(0,3,0); \ break; \ } \ break; \ case 1: \ switch(nu){ \ case 0: \ CALL_FUNCTION(1,0,0); \ break; \ case 1: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 2: \ CALL_FUNCTION(1,2,0); \ break; \ case 3: \ CALL_FUNCTION(1,3,0); \ break; \ } \ break; \ case 2: \ switch(nu){ \ case 0: \ CALL_FUNCTION(2,0,0); \ break; \ case 1: \ CALL_FUNCTION(2,1,0); \ break; \ case 2: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 3: \ CALL_FUNCTION(2,3,0); \ break; \ } \ break; \ case 3: \ switch(nu){ \ case 0: \ CALL_FUNCTION(3,0,0); \ break; \ case 1: \ CALL_FUNCTION(3,1,0); \ break; \ case 2: \ CALL_FUNCTION(3,2,0); \ break; \ case 3: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ } \ break; \ } \ } void siteComputeGenStapleParityKernel(void* staple_even, void* staple_odd, void* sitelink_even, void* sitelink_odd, void* fatlink_even, void* fatlink_odd, int mu, int nu, double mycoeff, QudaReconstructType recon, QudaPrecision prec, dim3 halfGridDim, llfat_kernel_param_t kparam, hipStream_t* stream) { //compute even and odd #define CALL_FUNCTION(mu, nu) \ if (prec == QUDA_DOUBLE_PRECISION){ \ if(recon == QUDA_RECONSTRUCT_NO){ \ hipLaunchKernelGGL(( do_siteComputeGenStapleParity18Kernel<mu,nu, 0>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (double2*)staple_even, (double2*)staple_odd, \ (double2*)sitelink_even, (double2*)sitelink_odd, \ (double2*)fatlink_even, (double2*)fatlink_odd, \ (double)mycoeff, kparam); \ hipLaunchKernelGGL(( do_siteComputeGenStapleParity18Kernel<mu,nu, 1>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (double2*)staple_odd, (double2*)staple_even, \ (double2*)sitelink_odd, (double2*)sitelink_even, \ (double2*)fatlink_odd, (double2*)fatlink_even, \ (double)mycoeff, kparam); \ }else{ \ hipLaunchKernelGGL(( do_siteComputeGenStapleParity12Kernel<mu,nu, 0>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (double2*)staple_even, (double2*)staple_odd, \ (double2*)sitelink_even, (double2*)sitelink_odd, \ (double2*)fatlink_even, (double2*)fatlink_odd, \ (double)mycoeff, kparam); \ hipLaunchKernelGGL(( do_siteComputeGenStapleParity12Kernel<mu,nu, 1>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (double2*)staple_odd, (double2*)staple_even, \ (double2*)sitelink_odd, (double2*)sitelink_even, \ (double2*)fatlink_odd, (double2*)fatlink_even, \ (double)mycoeff, kparam); \ } \ }else { \ if(recon == QUDA_RECONSTRUCT_NO){ \ hipLaunchKernelGGL(( do_siteComputeGenStapleParity18Kernel<mu,nu, 0>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (float2*)staple_even, (float2*)staple_odd, \ (float2*)sitelink_even, (float2*)sitelink_odd, \ (float2*)fatlink_even, (float2*)fatlink_odd, \ (float)mycoeff, kparam); \ hipLaunchKernelGGL(( do_siteComputeGenStapleParity18Kernel<mu,nu, 1>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (float2*)staple_odd, (float2*)staple_even, \ (float2*)sitelink_odd, (float2*)sitelink_even, \ (float2*)fatlink_odd, (float2*)fatlink_even, \ (float)mycoeff, kparam); \ }else{ \ hipLaunchKernelGGL(( do_siteComputeGenStapleParity12Kernel<mu,nu, 0>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (float2*)staple_even, (float2*)staple_odd, \ (float2*)sitelink_even, (float2*)sitelink_odd, \ (float2*)fatlink_even, (float2*)fatlink_odd, \ (float)mycoeff, kparam); \ hipLaunchKernelGGL(( do_siteComputeGenStapleParity12Kernel<mu,nu, 1>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (float2*)staple_odd, (float2*)staple_even, \ (float2*)sitelink_odd, (float2*)sitelink_even, \ (float2*)fatlink_odd, (float2*)fatlink_even, \ (float)mycoeff, kparam); \ } \ } dim3 blockDim(BLOCK_DIM , 1, 1); ENUMERATE_FUNCS(mu,nu); #undef CALL_FUNCTION } void computeGenStapleFieldParityKernel(void* staple_even, void* staple_odd, void* sitelink_even, void* sitelink_odd, void* fatlink_even, void* fatlink_odd, void* mulink_even, void* mulink_odd, int mu, int nu, int save_staple, double mycoeff, QudaReconstructType recon, QudaPrecision prec, dim3 halfGridDim, llfat_kernel_param_t kparam, hipStream_t* stream) { #define CALL_FUNCTION(mu, nu, save_staple) \ if (prec == QUDA_DOUBLE_PRECISION){ \ if(recon == QUDA_RECONSTRUCT_NO){ \ hipLaunchKernelGGL(( do_computeGenStapleFieldParity18Kernel<mu,nu, 0, save_staple>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (double2*)staple_even, (double2*)staple_odd, \ (double2*)sitelink_even, (double2*)sitelink_odd, \ (double2*)fatlink_even, (double2*)fatlink_odd, \ (double2*)mulink_even, (double2*)mulink_odd, \ (double)mycoeff, kparam); \ hipLaunchKernelGGL(( do_computeGenStapleFieldParity18Kernel<mu,nu, 1, save_staple>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (double2*)staple_odd, (double2*)staple_even, \ (double2*)sitelink_odd, (double2*)sitelink_even, \ (double2*)fatlink_odd, (double2*)fatlink_even, \ (double2*)mulink_odd, (double2*)mulink_even, \ (double)mycoeff, kparam); \ }else{ \ hipLaunchKernelGGL(( do_computeGenStapleFieldParity12Kernel<mu,nu, 0, save_staple>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (double2*)staple_even, (double2*)staple_odd, \ (double2*)sitelink_even, (double2*)sitelink_odd, \ (double2*)fatlink_even, (double2*)fatlink_odd, \ (double2*)mulink_even, (double2*)mulink_odd, \ (double)mycoeff, kparam); \ hipLaunchKernelGGL(( do_computeGenStapleFieldParity12Kernel<mu,nu, 1, save_staple>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (double2*)staple_odd, (double2*)staple_even, \ (double2*)sitelink_odd, (double2*)sitelink_even, \ (double2*)fatlink_odd, (double2*)fatlink_even, \ (double2*)mulink_odd, (double2*)mulink_even, \ (double)mycoeff, kparam); \ } \ }else{ \ if(recon == QUDA_RECONSTRUCT_NO){ \ hipLaunchKernelGGL(( do_computeGenStapleFieldParity18Kernel<mu,nu, 0, save_staple>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (float2*)staple_even, (float2*)staple_odd, \ (float2*)sitelink_even, (float2*)sitelink_odd, \ (float2*)fatlink_even, (float2*)fatlink_odd, \ (float2*)mulink_even, (float2*)mulink_odd, \ (float)mycoeff, kparam); \ hipLaunchKernelGGL(( do_computeGenStapleFieldParity18Kernel<mu,nu, 1, save_staple>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (float2*)staple_odd, (float2*)staple_even, \ (float2*)sitelink_odd, (float2*)sitelink_even, \ (float2*)fatlink_odd, (float2*)fatlink_even, \ (float2*)mulink_odd, (float2*)mulink_even, \ (float)mycoeff, kparam); \ }else{ \ hipLaunchKernelGGL(( do_computeGenStapleFieldParity12Kernel<mu,nu, 0, save_staple>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (float2*)staple_even, (float2*)staple_odd, \ (float2*)sitelink_even, (float2*)sitelink_odd, \ (float2*)fatlink_even, (float2*)fatlink_odd, \ (float2*)mulink_even, (float2*)mulink_odd, \ (float)mycoeff, kparam); \ hipLaunchKernelGGL(( do_computeGenStapleFieldParity12Kernel<mu,nu, 1, save_staple>) \ , dim3(halfGridDim), dim3(blockDim), 0, *stream, (float2*)staple_odd, (float2*)staple_even, \ (float2*)sitelink_odd, (float2*)sitelink_even, \ (float2*)fatlink_odd, (float2*)fatlink_even, \ (float2*)mulink_odd, (float2*)mulink_even, \ (float)mycoeff, kparam); \ } \ } BIND_MU_LINK(); dim3 blockDim(BLOCK_DIM , 1, 1); ENUMERATE_FUNCS_SAVE(mu,nu,save_staple); UNBIND_MU_LINK(); #undef CALL_FUNCTION } void llfatOneLinkKernel(FullGauge cudaFatLink, FullGauge cudaSiteLink, FullStaple cudaStaple, FullStaple cudaStaple1, QudaGaugeParam* param, double* act_path_coeff) { QudaPrecision prec = cudaSiteLink.precision; QudaReconstructType recon = cudaSiteLink.reconstruct; BIND_SITE_AND_FAT_LINK; int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3]; dim3 gridDim(volume/BLOCK_DIM,1,1); dim3 blockDim(BLOCK_DIM , 1, 1); staple_bytes = cudaStaple.bytes; if(prec == QUDA_DOUBLE_PRECISION){ if(recon == QUDA_RECONSTRUCT_NO){ hipLaunchKernelGGL(( llfatOneLink18Kernel), dim3(gridDim), dim3(blockDim), 0, 0, (double2*)cudaSiteLink.even, (double2*)cudaSiteLink.odd, (double2*)cudaFatLink.even, (double2*)cudaFatLink.odd, (double)act_path_coeff[0], (double)act_path_coeff[5]); }else{ hipLaunchKernelGGL(( llfatOneLink12Kernel), dim3(gridDim), dim3(blockDim), 0, 0, (double2*)cudaSiteLink.even, (double2*)cudaSiteLink.odd, (double2*)cudaFatLink.even, (double2*)cudaFatLink.odd, (double)act_path_coeff[0], (double)act_path_coeff[5]); } }else{ //single precision if(recon == QUDA_RECONSTRUCT_NO){ hipLaunchKernelGGL(( llfatOneLink18Kernel), dim3(gridDim), dim3(blockDim), 0, 0, (float2*)cudaSiteLink.even, (float2*)cudaSiteLink.odd, (float2*)cudaFatLink.even, (float2*)cudaFatLink.odd, (float)act_path_coeff[0], (float)act_path_coeff[5]); }else{ hipLaunchKernelGGL(( llfatOneLink12Kernel), dim3(gridDim), dim3(blockDim), 0, 0, (float2*)cudaSiteLink.even, (float2*)cudaSiteLink.odd, (float2*)cudaFatLink.even, (float2*)cudaFatLink.odd, (float)act_path_coeff[0], (float)act_path_coeff[5]); } } }
8eff5f732f82a1bd603442987b673b2d729f879d.cu
#include <stdio.h> #include <quda_internal.h> #include <llfat_quda.h> #include <cuda_runtime.h> #include <cuda.h> #include <read_gauge.h> #include <gauge_quda.h> #include <force_common.h> #if (__CUDA_ARCH__ >= 200) #define SITE_MATRIX_LOAD_TEX 1 #define MULINK_LOAD_TEX 1 #define FATLINK_LOAD_TEX 1 #else #define SITE_MATRIX_LOAD_TEX 0 #define MULINK_LOAD_TEX 1 #define FATLINK_LOAD_TEX 1 #endif #define WRITE_FAT_MATRIX(gauge, dir, idx)do { \ gauge[idx + dir*9*llfat_ga_stride] = FAT0; \ gauge[idx + (dir*9+1) * llfat_ga_stride] = FAT1; \ gauge[idx + (dir*9+2) * llfat_ga_stride] = FAT2; \ gauge[idx + (dir*9+3) * llfat_ga_stride] = FAT3; \ gauge[idx + (dir*9+4) * llfat_ga_stride] = FAT4; \ gauge[idx + (dir*9+5) * llfat_ga_stride] = FAT5; \ gauge[idx + (dir*9+6) * llfat_ga_stride] = FAT6; \ gauge[idx + (dir*9+7) * llfat_ga_stride] = FAT7; \ gauge[idx + (dir*9+8) * llfat_ga_stride] = FAT8;} while(0) #define WRITE_STAPLE_MATRIX(gauge, idx) \ gauge[idx] = STAPLE0; \ gauge[idx + staple_stride] = STAPLE1; \ gauge[idx + 2*staple_stride] = STAPLE2; \ gauge[idx + 3*staple_stride] = STAPLE3; \ gauge[idx + 4*staple_stride] = STAPLE4; \ gauge[idx + 5*staple_stride] = STAPLE5; \ gauge[idx + 6*staple_stride] = STAPLE6; \ gauge[idx + 7*staple_stride] = STAPLE7; \ gauge[idx + 8*staple_stride] = STAPLE8; #define SCALAR_MULT_SU3_MATRIX(a, b, c) \ c##00_re = a*b##00_re; \ c##00_im = a*b##00_im; \ c##01_re = a*b##01_re; \ c##01_im = a*b##01_im; \ c##02_re = a*b##02_re; \ c##02_im = a*b##02_im; \ c##10_re = a*b##10_re; \ c##10_im = a*b##10_im; \ c##11_re = a*b##11_re; \ c##11_im = a*b##11_im; \ c##12_re = a*b##12_re; \ c##12_im = a*b##12_im; \ c##20_re = a*b##20_re; \ c##20_im = a*b##20_im; \ c##21_re = a*b##21_re; \ c##21_im = a*b##21_im; \ c##22_re = a*b##22_re; \ c##22_im = a*b##22_im; \ #define LOAD_MATRIX_18_SINGLE(gauge, dir, idx, var, stride) \ float2 var##0 = gauge[idx + dir*9*stride]; \ float2 var##1 = gauge[idx + dir*9*stride + stride]; \ float2 var##2 = gauge[idx + dir*9*stride + 2*stride]; \ float2 var##3 = gauge[idx + dir*9*stride + 3*stride]; \ float2 var##4 = gauge[idx + dir*9*stride + 4*stride]; \ float2 var##5 = gauge[idx + dir*9*stride + 5*stride]; \ float2 var##6 = gauge[idx + dir*9*stride + 6*stride]; \ float2 var##7 = gauge[idx + dir*9*stride + 7*stride]; \ float2 var##8 = gauge[idx + dir*9*stride + 8*stride]; #define LOAD_MATRIX_18_SINGLE_TEX(gauge, dir, idx, var, stride) \ float2 var##0 = tex1Dfetch(gauge, idx + dir*9*stride); \ float2 var##1 = tex1Dfetch(gauge, idx + dir*9*stride + stride); \ float2 var##2 = tex1Dfetch(gauge, idx + dir*9*stride + 2*stride); \ float2 var##3 = tex1Dfetch(gauge, idx + dir*9*stride + 3*stride); \ float2 var##4 = tex1Dfetch(gauge, idx + dir*9*stride + 4*stride); \ float2 var##5 = tex1Dfetch(gauge, idx + dir*9*stride + 5*stride); \ float2 var##6 = tex1Dfetch(gauge, idx + dir*9*stride + 6*stride); \ float2 var##7 = tex1Dfetch(gauge, idx + dir*9*stride + 7*stride); \ float2 var##8 = tex1Dfetch(gauge, idx + dir*9*stride + 8*stride); #define LOAD_MATRIX_18_DOUBLE(gauge, dir, idx, var, stride) \ double2 var##0 = gauge[idx + dir*9*stride]; \ double2 var##1 = gauge[idx + dir*9*stride + stride]; \ double2 var##2 = gauge[idx + dir*9*stride + 2*stride]; \ double2 var##3 = gauge[idx + dir*9*stride + 3*stride]; \ double2 var##4 = gauge[idx + dir*9*stride + 4*stride]; \ double2 var##5 = gauge[idx + dir*9*stride + 5*stride]; \ double2 var##6 = gauge[idx + dir*9*stride + 6*stride]; \ double2 var##7 = gauge[idx + dir*9*stride + 7*stride]; \ double2 var##8 = gauge[idx + dir*9*stride + 8*stride]; #define LOAD_MATRIX_18_DOUBLE_TEX(gauge, dir, idx, var, stride) \ double2 var##0 = fetch_double2(gauge, idx + dir*9*stride); \ double2 var##1 = fetch_double2(gauge, idx + dir*9*stride + stride); \ double2 var##2 = fetch_double2(gauge, idx + dir*9*stride + 2*stride); \ double2 var##3 = fetch_double2(gauge, idx + dir*9*stride + 3*stride); \ double2 var##4 = fetch_double2(gauge, idx + dir*9*stride + 4*stride); \ double2 var##5 = fetch_double2(gauge, idx + dir*9*stride + 5*stride); \ double2 var##6 = fetch_double2(gauge, idx + dir*9*stride + 6*stride); \ double2 var##7 = fetch_double2(gauge, idx + dir*9*stride + 7*stride); \ double2 var##8 = fetch_double2(gauge, idx + dir*9*stride + 8*stride); #define LOAD_MATRIX_12_SINGLE_DECLARE(gauge, dir, idx, var, stride) \ float2 var##0 = gauge[idx + dir*6*stride]; \ float2 var##1 = gauge[idx + dir*6*stride + stride]; \ float2 var##2 = gauge[idx + dir*6*stride + 2*stride]; \ float2 var##3 = gauge[idx + dir*6*stride + 3*stride]; \ float2 var##4 = gauge[idx + dir*6*stride + 4*stride]; \ float2 var##5 = gauge[idx + dir*6*stride + 5*stride]; \ float2 var##6, var##7, var##8; #define LOAD_MATRIX_12_SINGLE_TEX_DECLARE(gauge, dir, idx, var, stride) \ float2 var##0 = tex1Dfetch(gauge, idx + dir*6*stride); \ float2 var##1 = tex1Dfetch(gauge, idx + dir*6*stride + stride); \ float2 var##2 = tex1Dfetch(gauge, idx + dir*6*stride + 2*stride); \ float2 var##3 = tex1Dfetch(gauge, idx + dir*6*stride + 3*stride); \ float2 var##4 = tex1Dfetch(gauge, idx + dir*6*stride + 4*stride); \ float2 var##5 = tex1Dfetch(gauge, idx + dir*6*stride + 5*stride); \ float2 var##6, var##7, var##8; #define LOAD_MATRIX_18_SINGLE_DECLARE(gauge, dir, idx, var, stride) \ float2 var##0 = gauge[idx + dir*9*stride]; \ float2 var##1 = gauge[idx + dir*9*stride + stride]; \ float2 var##2 = gauge[idx + dir*9*stride + 2*stride]; \ float2 var##3 = gauge[idx + dir*9*stride + 3*stride]; \ float2 var##4 = gauge[idx + dir*9*stride + 4*stride]; \ float2 var##5 = gauge[idx + dir*9*stride + 5*stride]; \ float2 var##6 = gauge[idx + dir*9*stride + 6*stride]; \ float2 var##7 = gauge[idx + dir*9*stride + 7*stride]; \ float2 var##8 = gauge[idx + dir*9*stride + 8*stride]; #define LOAD_MATRIX_18_SINGLE_TEX_DECLARE(gauge, dir, idx, var, stride) \ float2 var##0 = tex1Dfetch(gauge, idx + dir*9*stride); \ float2 var##1 = tex1Dfetch(gauge, idx + dir*9*stride + stride); \ float2 var##2 = tex1Dfetch(gauge, idx + dir*9*stride + 2*stride); \ float2 var##3 = tex1Dfetch(gauge, idx + dir*9*stride + 3*stride); \ float2 var##4 = tex1Dfetch(gauge, idx + dir*9*stride + 4*stride); \ float2 var##5 = tex1Dfetch(gauge, idx + dir*9*stride + 5*stride); \ float2 var##6 = tex1Dfetch(gauge, idx + dir*9*stride + 6*stride); \ float2 var##7 = tex1Dfetch(gauge, idx + dir*9*stride + 7*stride); \ float2 var##8 = tex1Dfetch(gauge, idx + dir*9*stride + 8*stride); #define LOAD_MATRIX_18_DOUBLE_DECLARE(gauge, dir, idx, var, stride) \ double2 var##0 = gauge[idx + dir*9*stride]; \ double2 var##1 = gauge[idx + dir*9*stride + stride]; \ double2 var##2 = gauge[idx + dir*9*stride + 2*stride]; \ double2 var##3 = gauge[idx + dir*9*stride + 3*stride]; \ double2 var##4 = gauge[idx + dir*9*stride + 4*stride]; \ double2 var##5 = gauge[idx + dir*9*stride + 5*stride]; \ double2 var##6 = gauge[idx + dir*9*stride + 6*stride]; \ double2 var##7 = gauge[idx + dir*9*stride + 7*stride]; \ double2 var##8 = gauge[idx + dir*9*stride + 8*stride]; #define LOAD_MATRIX_18_DOUBLE_TEX_DECLARE(gauge, dir, idx, var, stride) \ double2 var##0 = fetch_double2(gauge, idx + dir*9*stride); \ double2 var##1 = fetch_double2(gauge, idx + dir*9*stride + stride); \ double2 var##2 = fetch_double2(gauge, idx + dir*9*stride + 2*stride); \ double2 var##3 = fetch_double2(gauge, idx + dir*9*stride + 3*stride); \ double2 var##4 = fetch_double2(gauge, idx + dir*9*stride + 4*stride); \ double2 var##5 = fetch_double2(gauge, idx + dir*9*stride + 5*stride); \ double2 var##6 = fetch_double2(gauge, idx + dir*9*stride + 6*stride); \ double2 var##7 = fetch_double2(gauge, idx + dir*9*stride + 7*stride); \ double2 var##8 = fetch_double2(gauge, idx + dir*9*stride + 8*stride); #define LOAD_MATRIX_12_DOUBLE_DECLARE(gauge, dir, idx, var, stride) \ double2 var##0 = gauge[idx + dir*6*stride]; \ double2 var##1 = gauge[idx + dir*6*stride + stride]; \ double2 var##2 = gauge[idx + dir*6*stride + 2*stride]; \ double2 var##3 = gauge[idx + dir*6*stride + 3*stride]; \ double2 var##4 = gauge[idx + dir*6*stride + 4*stride]; \ double2 var##5 = gauge[idx + dir*6*stride + 5*stride]; \ double2 var##6, var##7, var##8; #define LOAD_MATRIX_12_DOUBLE_TEX_DECLARE(gauge, dir, idx, var, stride) \ double2 var##0 = fetch_double2(gauge, idx + dir*6*stride); \ double2 var##1 = fetch_double2(gauge, idx + dir*6*stride + stride); \ double2 var##2 = fetch_double2(gauge, idx + dir*6*stride + 2*stride); \ double2 var##3 = fetch_double2(gauge, idx + dir*6*stride + 3*stride); \ double2 var##4 = fetch_double2(gauge, idx + dir*6*stride + 4*stride); \ double2 var##5 = fetch_double2(gauge, idx + dir*6*stride + 5*stride); \ double2 var##6, var##7, var##8; #define LLFAT_ADD_SU3_MATRIX(ma, mb, mc) \ mc##00_re = ma##00_re + mb##00_re; \ mc##00_im = ma##00_im + mb##00_im; \ mc##01_re = ma##01_re + mb##01_re; \ mc##01_im = ma##01_im + mb##01_im; \ mc##02_re = ma##02_re + mb##02_re; \ mc##02_im = ma##02_im + mb##02_im; \ mc##10_re = ma##10_re + mb##10_re; \ mc##10_im = ma##10_im + mb##10_im; \ mc##11_re = ma##11_re + mb##11_re; \ mc##11_im = ma##11_im + mb##11_im; \ mc##12_re = ma##12_re + mb##12_re; \ mc##12_im = ma##12_im + mb##12_im; \ mc##20_re = ma##20_re + mb##20_re; \ mc##20_im = ma##20_im + mb##20_im; \ mc##21_re = ma##21_re + mb##21_re; \ mc##21_im = ma##21_im + mb##21_im; \ mc##22_re = ma##22_re + mb##22_re; \ mc##22_im = ma##22_im + mb##22_im; __constant__ int dir1_array[16]; __constant__ int dir2_array[16]; __constant__ int last_proc_in_tdim; __constant__ int first_proc_in_tdim; unsigned long staple_bytes=0; void llfat_init_cuda(QudaGaugeParam* param) { static int llfat_init_cuda_flag = 0; if (llfat_init_cuda_flag){ return; } llfat_init_cuda_flag = 1; init_kernel_cuda(param); int Vh = param->X[0]*param->X[1]*param->X[2]*param->X[3]/2; int site_ga_stride = param->site_ga_pad + Vh; int staple_stride = param->staple_pad + Vh; int llfat_ga_stride = param->llfat_ga_pad + Vh; cudaMemcpyToSymbol("site_ga_stride", &site_ga_stride, sizeof(int)); cudaMemcpyToSymbol("staple_stride", &staple_stride, sizeof(int)); cudaMemcpyToSymbol("llfat_ga_stride", &llfat_ga_stride, sizeof(int)); int dir1[16]; int dir2[16]; for(int nu =0; nu < 4; nu++) for(int mu=0; mu < 4; mu++){ if(nu == mu) continue; int d1, d2; for(d1=0; d1 < 4; d1 ++){ if(d1 != nu && d1 != mu){ break; } } dir1[nu*4+mu] = d1; for(d2=0; d2 < 4; d2 ++){ if(d2 != nu && d2 != mu && d2 != d1){ break; } } dir2[nu*4+mu] = d2; } cudaMemcpyToSymbol("dir1_array", &dir1, sizeof(dir1)); cudaMemcpyToSymbol("dir2_array", &dir2, sizeof(dir2)); int first_proc_in_tdim = 0; int last_proc_in_tdim = 0; if(commCoords(3) == (commDim(3) -1)){ last_proc_in_tdim = 1; } if(commCoords(3) == 0){ first_proc_in_tdim = 1; } cudaMemcpyToSymbol("last_proc_in_tdim", &last_proc_in_tdim, sizeof(int)); cudaMemcpyToSymbol("first_proc_in_tdim", &first_proc_in_tdim, sizeof(int)); } #define LLFAT_CONCAT(a,b) a##b##Kernel #define LLFAT_KERNEL(a,b) LLFAT_CONCAT(a,b) //precision: 0 is for double, 1 is for single //single precision, common macro #define PRECISION 1 #define Float float #define LOAD_FAT_MATRIX(gauge, dir, idx) LOAD_MATRIX_18_SINGLE(gauge, dir, idx, FAT, llfat_ga_stride) #if (MULINK_LOAD_TEX == 1) #define LOAD_EVEN_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX((odd_bit?muLink1TexSingle:muLink0TexSingle), dir, idx, var, staple_stride) #define LOAD_ODD_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX((odd_bit?muLink0TexSingle:muLink1TexSingle), dir, idx, var, staple_stride) #else #define LOAD_EVEN_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE(mulink_even, dir, idx, var, staple_stride) #define LOAD_ODD_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE(mulink_odd, dir, idx, var, staple_stride) #endif #if (FATLINK_LOAD_TEX == 1) #define LOAD_EVEN_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_SINGLE_TEX((odd_bit?fatGauge1TexSingle:fatGauge0TexSingle), dir, idx, FAT, llfat_ga_stride); #define LOAD_ODD_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_SINGLE_TEX((odd_bit?fatGauge0TexSingle:fatGauge1TexSingle), dir, idx, FAT, llfat_ga_stride); #else #define LOAD_EVEN_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_SINGLE(fatlink_even, dir, idx, FAT, llfat_ga_stride) #define LOAD_ODD_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_SINGLE(fatlink_odd, dir, idx, FAT, llfat_ga_stride) #endif //single precision, 12-reconstruct #define SITELINK0TEX siteLink0TexSingle #define SITELINK1TEX siteLink1TexSingle #if (SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX_DECLARE((odd_bit?SITELINK1TEX:SITELINK0TEX), dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX_DECLARE((odd_bit?SITELINK0TEX:SITELINK1TEX), dir, idx, var, site_ga_stride) #else #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_DECLARE(sitelink_even, dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_DECLARE(sitelink_odd, dir, idx, var, site_ga_stride) #endif #define LOAD_SITE_MATRIX(sitelink, dir, idx, var) LOAD_MATRIX_12_SINGLE_DECLARE(sitelink, dir, idx, var, site_ga_stride) #define RECONSTRUCT_SITE_LINK(dir, idx, sign, var) RECONSTRUCT_LINK_12(dir, idx, sign, var); #define FloatN float2 #define FloatM float2 #define RECONSTRUCT 12 #include "llfat_core.h" #undef SITELINK0TEX #undef SITELINK1TEX #undef LOAD_EVEN_SITE_MATRIX #undef LOAD_ODD_SITE_MATRIX #undef LOAD_SITE_MATRIX #undef RECONSTRUCT_SITE_LINK #undef FloatN #undef FloatM #undef RECONSTRUCT //single precision, 18-reconstruct #define SITELINK0TEX siteLink0TexSingle_norecon #define SITELINK1TEX siteLink1TexSingle_norecon #if (SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX_DECLARE((odd_bit?SITELINK1TEX:SITELINK0TEX), dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX_DECLARE((odd_bit?SITELINK0TEX:SITELINK1TEX), dir, idx, var, site_ga_stride) #else #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_DECLARE(sitelink_even, dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_DECLARE(sitelink_odd, dir, idx, var, site_ga_stride) #endif #define LOAD_SITE_MATRIX(sitelink, dir, idx, var) LOAD_MATRIX_18_SINGLE(sitelink, dir, idx, var, site_ga_stride) #define RECONSTRUCT_SITE_LINK(dir, idx, sign, var) #define FloatN float2 #define FloatM float2 #define RECONSTRUCT 18 #include "llfat_core.h" #undef SITELINK0TEX #undef SITELINK1TEX #undef LOAD_EVEN_SITE_MATRIX #undef LOAD_ODD_SITE_MATRIX #undef LOAD_SITE_MATRIX #undef RECONSTRUCT_SITE_LINK #undef FloatN #undef FloatM #undef RECONSTRUCT #undef PRECISION #undef Float #undef LOAD_FAT_MATRIX #undef LOAD_EVEN_MULINK_MATRIX #undef LOAD_ODD_MULINK_MATRIX #undef LOAD_EVEN_FAT_MATRIX #undef LOAD_ODD_FAT_MATRIX //double precision, common macro #define PRECISION 0 #define Float double #define LOAD_FAT_MATRIX(gauge, dir, idx) LOAD_MATRIX_18_DOUBLE(gauge, dir, idx, FAT, llfat_ga_stride) #if (MULINK_LOAD_TEX == 1) #define LOAD_EVEN_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX((odd_bit?muLink1TexDouble:muLink0TexDouble), dir, idx, var, staple_stride) #define LOAD_ODD_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX((odd_bit?muLink0TexDouble:muLink1TexDouble), dir, idx, var, staple_stride) #else #define LOAD_EVEN_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE(mulink_even, dir, idx, var, staple_stride) #define LOAD_ODD_MULINK_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE(mulink_odd, dir, idx, var, staple_stride) #endif #if (FATLINK_LOAD_TEX == 1) #define LOAD_EVEN_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_DOUBLE_TEX((odd_bit?fatGauge1TexDouble:fatGauge0TexDouble), dir, idx, FAT, llfat_ga_stride) #define LOAD_ODD_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_DOUBLE_TEX((odd_bit?fatGauge0TexDouble:fatGauge1TexDouble), dir, idx, FAT, llfat_ga_stride) #else #define LOAD_EVEN_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_DOUBLE(fatlink_even, dir, idx, FAT, llfat_ga_stride) #define LOAD_ODD_FAT_MATRIX(dir, idx) LOAD_MATRIX_18_DOUBLE(fatlink_odd, dir, idx, FAT, llfat_ga_stride) #endif //double precision, 18-reconstruct #define SITELINK0TEX siteLink0TexDouble #define SITELINK1TEX siteLink1TexDouble #if (SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX_DECLARE((odd_bit?SITELINK1TEX:SITELINK0TEX), dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX_DECLARE((odd_bit?SITELINK0TEX:SITELINK1TEX), dir, idx, var, site_ga_stride) #else #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_DECLARE(sitelink_even, dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_DECLARE(sitelink_odd, dir, idx, var, site_ga_stride) #endif #define LOAD_SITE_MATRIX(sitelink, dir, idx, var) LOAD_MATRIX_18_DOUBLE(sitelink, dir, idx, var, site_ga_stride) #define RECONSTRUCT_SITE_LINK(dir, idx, sign, var) #define FloatN double2 #define FloatM double2 #define RECONSTRUCT 18 #include "llfat_core.h" #undef SITELINK0TEX #undef SITELINK1TEX #undef LOAD_EVEN_SITE_MATRIX #undef LOAD_ODD_SITE_MATRIX #undef LOAD_SITE_MATRIX #undef RECONSTRUCT_SITE_LINK #undef FloatN #undef FloatM #undef RECONSTRUCT #if 1 //double precision, 12-reconstruct #define SITELINK0TEX siteLink0TexDouble #define SITELINK1TEX siteLink1TexDouble #if (SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX_DECLARE((odd_bit?SITELINK1TEX:SITELINK0TEX), dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX_DECLARE((odd_bit?SITELINK0TEX:SITELINK1TEX), dir, idx, var, site_ga_stride) #else #define LOAD_EVEN_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_DECLARE(sitelink_even, dir, idx, var, site_ga_stride) #define LOAD_ODD_SITE_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_DECLARE(sitelink_odd, dir, idx, var, site_ga_stride) #endif #define LOAD_SITE_MATRIX(sitelink, dir, idx, var) LOAD_MATRIX_12_DOUBLE_DECLARE(sitelink, dir, idx, var, site_ga_stride) #define RECONSTRUCT_SITE_LINK(dir, idx, sign, var) RECONSTRUCT_LINK_12(dir, idx, sign, var); #define FloatN double2 #define FloatM double2 #define RECONSTRUCT 12 #include "llfat_core.h" #undef SITELINK0TEX #undef SITELINK1TEX #undef LOAD_EVEN_SITE_MATRIX #undef LOAD_ODD_SITE_MATRIX #undef LOAD_SITE_MATRIX #undef RECONSTRUCT_SITE_LINK #undef FloatN #undef FloatM #undef RECONSTRUCT #endif #undef PRECISION #undef Float #undef LOAD_FAT_MATRIX #undef LOAD_EVEN_MULINK_MATRIX #undef LOAD_ODD_MULINK_MATRIX #undef LOAD_EVEN_FAT_MATRIX #undef LOAD_ODD_FAT_MATRIX #undef LLFAT_CONCAT #undef LLFAT_KERNEL #define UNBIND_ALL_TEXTURE do{ \ if(prec ==QUDA_DOUBLE_PRECISION){ \ cudaUnbindTexture(siteLink0TexDouble); \ cudaUnbindTexture(siteLink1TexDouble); \ cudaUnbindTexture(fatGauge0TexDouble); \ cudaUnbindTexture(fatGauge1TexDouble); \ cudaUnbindTexture(muLink0TexDouble); \ cudaUnbindTexture(muLink1TexDouble); \ }else{ \ if(cudaSiteLink.reconstruct == QUDA_RECONSTRUCT_NO){ \ cudaUnbindTexture(siteLink0TexSingle_norecon); \ cudaUnbindTexture(siteLink1TexSingle_norecon); \ }else{ \ cudaUnbindTexture(siteLink0TexSingle); \ cudaUnbindTexture(siteLink1TexSingle); \ } \ cudaUnbindTexture(fatGauge0TexSingle); \ cudaUnbindTexture(fatGauge1TexSingle); \ cudaUnbindTexture(muLink0TexSingle); \ cudaUnbindTexture(muLink1TexSingle); \ } \ }while(0) #define UNBIND_SITE_AND_FAT_LINK do{ \ if(prec == QUDA_DOUBLE_PRECISION){ \ cudaUnbindTexture(siteLink0TexDouble); \ cudaUnbindTexture(siteLink1TexDouble); \ cudaUnbindTexture(fatGauge0TexDouble); \ cudaUnbindTexture(fatGauge1TexDouble); \ }else { \ if(cudaSiteLink.reconstruct == QUDA_RECONSTRUCT_NO){ \ cudaUnbindTexture(siteLink0TexSingle_norecon); \ cudaUnbindTexture(siteLink1TexSingle_norecon); \ }else{ \ cudaUnbindTexture(siteLink0TexSingle); \ cudaUnbindTexture(siteLink1TexSingle); \ } \ cudaUnbindTexture(fatGauge0TexSingle); \ cudaUnbindTexture(fatGauge1TexSingle); \ } \ }while(0) #define BIND_MU_LINK() do{ \ if(prec == QUDA_DOUBLE_PRECISION){ \ cudaBindTexture(0, muLink0TexDouble, mulink_even, staple_bytes); \ cudaBindTexture(0, muLink1TexDouble, mulink_odd, staple_bytes); \ }else{ \ cudaBindTexture(0, muLink0TexSingle, mulink_even, staple_bytes); \ cudaBindTexture(0, muLink1TexSingle, mulink_odd, staple_bytes); \ } \ }while(0) #define UNBIND_MU_LINK() do{ \ if(prec == QUDA_DOUBLE_PRECISION){ \ cudaUnbindTexture(muLink0TexSingle); \ cudaUnbindTexture(muLink1TexSingle); \ }else{ \ cudaUnbindTexture(muLink0TexDouble); \ cudaUnbindTexture(muLink1TexDouble); \ } \ }while(0) #define BIND_SITE_AND_FAT_LINK do { \ if(prec == QUDA_DOUBLE_PRECISION){ \ cudaBindTexture(0, siteLink0TexDouble, cudaSiteLink.even, cudaSiteLink.bytes); \ cudaBindTexture(0, siteLink1TexDouble, cudaSiteLink.odd, cudaSiteLink.bytes); \ cudaBindTexture(0, fatGauge0TexDouble, cudaFatLink.even, cudaFatLink.bytes); \ cudaBindTexture(0, fatGauge1TexDouble, cudaFatLink.odd, cudaFatLink.bytes); \ }else{ \ if(cudaSiteLink.reconstruct == QUDA_RECONSTRUCT_NO){ \ cudaBindTexture(0, siteLink0TexSingle_norecon, cudaSiteLink.even, cudaSiteLink.bytes); \ cudaBindTexture(0, siteLink1TexSingle_norecon, cudaSiteLink.odd, cudaSiteLink.bytes); \ }else{ \ cudaBindTexture(0, siteLink0TexSingle, cudaSiteLink.even, cudaSiteLink.bytes); \ cudaBindTexture(0, siteLink1TexSingle, cudaSiteLink.odd, cudaSiteLink.bytes); \ } \ cudaBindTexture(0, fatGauge0TexSingle, cudaFatLink.even, cudaFatLink.bytes); \ cudaBindTexture(0, fatGauge1TexSingle, cudaFatLink.odd, cudaFatLink.bytes); \ } \ }while(0) #define BIND_MU_LINK() do{ \ if(prec == QUDA_DOUBLE_PRECISION){ \ cudaBindTexture(0, muLink0TexDouble, mulink_even, staple_bytes); \ cudaBindTexture(0, muLink1TexDouble, mulink_odd, staple_bytes); \ }else{ \ cudaBindTexture(0, muLink0TexSingle, mulink_even, staple_bytes); \ cudaBindTexture(0, muLink1TexSingle, mulink_odd, staple_bytes); \ } \ }while(0) #define UNBIND_MU_LINK() do{ \ if(prec == QUDA_DOUBLE_PRECISION){ \ cudaUnbindTexture(muLink0TexSingle); \ cudaUnbindTexture(muLink1TexSingle); \ }else{ \ cudaUnbindTexture(muLink0TexDouble); \ cudaUnbindTexture(muLink1TexDouble); \ } \ }while(0) #define BIND_SITE_AND_FAT_LINK_REVERSE do { \ if(prec == QUDA_DOUBLE_PRECISION){ \ cudaBindTexture(0, siteLink1TexDouble, cudaSiteLink.even, cudaSiteLink.bytes); \ cudaBindTexture(0, siteLink0TexDouble, cudaSiteLink.odd, cudaSiteLink.bytes); \ cudaBindTexture(0, fatGauge1TexDouble, cudaFatLink.even, cudaFatLink.bytes); \ cudaBindTexture(0, fatGauge0TexDouble, cudaFatLink.odd, cudaFatLink.bytes); \ }else{ \ if(cudaSiteLink.reconstruct == QUDA_RECONSTRUCT_NO){ \ cudaBindTexture(0, siteLink1TexSingle_norecon, cudaSiteLink.even, cudaSiteLink.bytes); \ cudaBindTexture(0, siteLink0TexSingle_norecon, cudaSiteLink.odd, cudaSiteLink.bytes); \ }else{ \ cudaBindTexture(0, siteLink1TexSingle, cudaSiteLink.even, cudaSiteLink.bytes); \ cudaBindTexture(0, siteLink0TexSingle, cudaSiteLink.odd, cudaSiteLink.bytes); \ } \ cudaBindTexture(0, fatGauge1TexSingle, cudaFatLink.even, cudaFatLink.bytes); \ cudaBindTexture(0, fatGauge0TexSingle, cudaFatLink.odd, cudaFatLink.bytes); \ } \ }while(0) #define ENUMERATE_FUNCS(mu,nu) switch(mu) { \ case 0: \ switch(nu){ \ case 0: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 1: \ CALL_FUNCTION(0,1); \ break; \ case 2: \ CALL_FUNCTION(0,2); \ break; \ case 3: \ CALL_FUNCTION(0,3); \ break; \ } \ break; \ case 1: \ switch(nu){ \ case 0: \ CALL_FUNCTION(1,0); \ break; \ case 1: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 2: \ CALL_FUNCTION(1,2); \ break; \ case 3: \ CALL_FUNCTION(1,3); \ break; \ } \ break; \ case 2: \ switch(nu){ \ case 0: \ CALL_FUNCTION(2,0); \ break; \ case 1: \ CALL_FUNCTION(2,1); \ break; \ case 2: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 3: \ CALL_FUNCTION(2,3); \ break; \ } \ break; \ case 3: \ switch(nu){ \ case 0: \ CALL_FUNCTION(3,0); \ break; \ case 1: \ CALL_FUNCTION(3,1); \ break; \ case 2: \ CALL_FUNCTION(3,2); \ break; \ case 3: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ } \ break; \ } #define ENUMERATE_FUNCS_SAVE(mu,nu, save_staple) if(save_staple){ \ switch(mu) { \ case 0: \ switch(nu){ \ case 0: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 1: \ CALL_FUNCTION(0,1,1); \ break; \ case 2: \ CALL_FUNCTION(0,2,1); \ break; \ case 3: \ CALL_FUNCTION(0,3,1); \ break; \ } \ break; \ case 1: \ switch(nu){ \ case 0: \ CALL_FUNCTION(1,0,1); \ break; \ case 1: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 2: \ CALL_FUNCTION(1,2,1); \ break; \ case 3: \ CALL_FUNCTION(1,3,1); \ break; \ } \ break; \ case 2: \ switch(nu){ \ case 0: \ CALL_FUNCTION(2,0,1); \ break; \ case 1: \ CALL_FUNCTION(2,1,1); \ break; \ case 2: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 3: \ CALL_FUNCTION(2,3,1); \ break; \ } \ break; \ case 3: \ switch(nu){ \ case 0: \ CALL_FUNCTION(3,0,1); \ break; \ case 1: \ CALL_FUNCTION(3,1,1); \ break; \ case 2: \ CALL_FUNCTION(3,2,1); \ break; \ case 3: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ } \ break; \ } \ }else{ \ switch(mu) { \ case 0: \ switch(nu){ \ case 0: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 1: \ CALL_FUNCTION(0,1,0); \ break; \ case 2: \ CALL_FUNCTION(0,2,0); \ break; \ case 3: \ CALL_FUNCTION(0,3,0); \ break; \ } \ break; \ case 1: \ switch(nu){ \ case 0: \ CALL_FUNCTION(1,0,0); \ break; \ case 1: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 2: \ CALL_FUNCTION(1,2,0); \ break; \ case 3: \ CALL_FUNCTION(1,3,0); \ break; \ } \ break; \ case 2: \ switch(nu){ \ case 0: \ CALL_FUNCTION(2,0,0); \ break; \ case 1: \ CALL_FUNCTION(2,1,0); \ break; \ case 2: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ case 3: \ CALL_FUNCTION(2,3,0); \ break; \ } \ break; \ case 3: \ switch(nu){ \ case 0: \ CALL_FUNCTION(3,0,0); \ break; \ case 1: \ CALL_FUNCTION(3,1,0); \ break; \ case 2: \ CALL_FUNCTION(3,2,0); \ break; \ case 3: \ printf("ERROR: invalid direction combination\n"); exit(1); \ break; \ } \ break; \ } \ } void siteComputeGenStapleParityKernel(void* staple_even, void* staple_odd, void* sitelink_even, void* sitelink_odd, void* fatlink_even, void* fatlink_odd, int mu, int nu, double mycoeff, QudaReconstructType recon, QudaPrecision prec, dim3 halfGridDim, llfat_kernel_param_t kparam, cudaStream_t* stream) { //compute even and odd #define CALL_FUNCTION(mu, nu) \ if (prec == QUDA_DOUBLE_PRECISION){ \ if(recon == QUDA_RECONSTRUCT_NO){ \ do_siteComputeGenStapleParity18Kernel<mu,nu, 0> \ <<<halfGridDim, blockDim, 0, *stream>>>((double2*)staple_even, (double2*)staple_odd, \ (double2*)sitelink_even, (double2*)sitelink_odd, \ (double2*)fatlink_even, (double2*)fatlink_odd, \ (double)mycoeff, kparam); \ do_siteComputeGenStapleParity18Kernel<mu,nu, 1> \ <<<halfGridDim, blockDim, 0, *stream>>>((double2*)staple_odd, (double2*)staple_even, \ (double2*)sitelink_odd, (double2*)sitelink_even, \ (double2*)fatlink_odd, (double2*)fatlink_even, \ (double)mycoeff, kparam); \ }else{ \ do_siteComputeGenStapleParity12Kernel<mu,nu, 0> \ <<<halfGridDim, blockDim, 0, *stream>>>((double2*)staple_even, (double2*)staple_odd, \ (double2*)sitelink_even, (double2*)sitelink_odd, \ (double2*)fatlink_even, (double2*)fatlink_odd, \ (double)mycoeff, kparam); \ do_siteComputeGenStapleParity12Kernel<mu,nu, 1> \ <<<halfGridDim, blockDim, 0, *stream>>>((double2*)staple_odd, (double2*)staple_even, \ (double2*)sitelink_odd, (double2*)sitelink_even, \ (double2*)fatlink_odd, (double2*)fatlink_even, \ (double)mycoeff, kparam); \ } \ }else { \ if(recon == QUDA_RECONSTRUCT_NO){ \ do_siteComputeGenStapleParity18Kernel<mu,nu, 0> \ <<<halfGridDim, blockDim, 0, *stream>>>((float2*)staple_even, (float2*)staple_odd, \ (float2*)sitelink_even, (float2*)sitelink_odd, \ (float2*)fatlink_even, (float2*)fatlink_odd, \ (float)mycoeff, kparam); \ do_siteComputeGenStapleParity18Kernel<mu,nu, 1> \ <<<halfGridDim, blockDim, 0, *stream>>>((float2*)staple_odd, (float2*)staple_even, \ (float2*)sitelink_odd, (float2*)sitelink_even, \ (float2*)fatlink_odd, (float2*)fatlink_even, \ (float)mycoeff, kparam); \ }else{ \ do_siteComputeGenStapleParity12Kernel<mu,nu, 0> \ <<<halfGridDim, blockDim, 0, *stream>>>((float2*)staple_even, (float2*)staple_odd, \ (float2*)sitelink_even, (float2*)sitelink_odd, \ (float2*)fatlink_even, (float2*)fatlink_odd, \ (float)mycoeff, kparam); \ do_siteComputeGenStapleParity12Kernel<mu,nu, 1> \ <<<halfGridDim, blockDim, 0, *stream>>>((float2*)staple_odd, (float2*)staple_even, \ (float2*)sitelink_odd, (float2*)sitelink_even, \ (float2*)fatlink_odd, (float2*)fatlink_even, \ (float)mycoeff, kparam); \ } \ } dim3 blockDim(BLOCK_DIM , 1, 1); ENUMERATE_FUNCS(mu,nu); #undef CALL_FUNCTION } void computeGenStapleFieldParityKernel(void* staple_even, void* staple_odd, void* sitelink_even, void* sitelink_odd, void* fatlink_even, void* fatlink_odd, void* mulink_even, void* mulink_odd, int mu, int nu, int save_staple, double mycoeff, QudaReconstructType recon, QudaPrecision prec, dim3 halfGridDim, llfat_kernel_param_t kparam, cudaStream_t* stream) { #define CALL_FUNCTION(mu, nu, save_staple) \ if (prec == QUDA_DOUBLE_PRECISION){ \ if(recon == QUDA_RECONSTRUCT_NO){ \ do_computeGenStapleFieldParity18Kernel<mu,nu, 0, save_staple> \ <<<halfGridDim, blockDim, 0, *stream>>>((double2*)staple_even, (double2*)staple_odd, \ (double2*)sitelink_even, (double2*)sitelink_odd, \ (double2*)fatlink_even, (double2*)fatlink_odd, \ (double2*)mulink_even, (double2*)mulink_odd, \ (double)mycoeff, kparam); \ do_computeGenStapleFieldParity18Kernel<mu,nu, 1, save_staple> \ <<<halfGridDim, blockDim, 0, *stream>>>((double2*)staple_odd, (double2*)staple_even, \ (double2*)sitelink_odd, (double2*)sitelink_even, \ (double2*)fatlink_odd, (double2*)fatlink_even, \ (double2*)mulink_odd, (double2*)mulink_even, \ (double)mycoeff, kparam); \ }else{ \ do_computeGenStapleFieldParity12Kernel<mu,nu, 0, save_staple> \ <<<halfGridDim, blockDim, 0, *stream>>>((double2*)staple_even, (double2*)staple_odd, \ (double2*)sitelink_even, (double2*)sitelink_odd, \ (double2*)fatlink_even, (double2*)fatlink_odd, \ (double2*)mulink_even, (double2*)mulink_odd, \ (double)mycoeff, kparam); \ do_computeGenStapleFieldParity12Kernel<mu,nu, 1, save_staple> \ <<<halfGridDim, blockDim, 0, *stream>>>((double2*)staple_odd, (double2*)staple_even, \ (double2*)sitelink_odd, (double2*)sitelink_even, \ (double2*)fatlink_odd, (double2*)fatlink_even, \ (double2*)mulink_odd, (double2*)mulink_even, \ (double)mycoeff, kparam); \ } \ }else{ \ if(recon == QUDA_RECONSTRUCT_NO){ \ do_computeGenStapleFieldParity18Kernel<mu,nu, 0, save_staple> \ <<<halfGridDim, blockDim, 0, *stream>>>((float2*)staple_even, (float2*)staple_odd, \ (float2*)sitelink_even, (float2*)sitelink_odd, \ (float2*)fatlink_even, (float2*)fatlink_odd, \ (float2*)mulink_even, (float2*)mulink_odd, \ (float)mycoeff, kparam); \ do_computeGenStapleFieldParity18Kernel<mu,nu, 1, save_staple> \ <<<halfGridDim, blockDim, 0, *stream>>>((float2*)staple_odd, (float2*)staple_even, \ (float2*)sitelink_odd, (float2*)sitelink_even, \ (float2*)fatlink_odd, (float2*)fatlink_even, \ (float2*)mulink_odd, (float2*)mulink_even, \ (float)mycoeff, kparam); \ }else{ \ do_computeGenStapleFieldParity12Kernel<mu,nu, 0, save_staple> \ <<<halfGridDim, blockDim, 0, *stream>>>((float2*)staple_even, (float2*)staple_odd, \ (float2*)sitelink_even, (float2*)sitelink_odd, \ (float2*)fatlink_even, (float2*)fatlink_odd, \ (float2*)mulink_even, (float2*)mulink_odd, \ (float)mycoeff, kparam); \ do_computeGenStapleFieldParity12Kernel<mu,nu, 1, save_staple> \ <<<halfGridDim, blockDim, 0, *stream>>>((float2*)staple_odd, (float2*)staple_even, \ (float2*)sitelink_odd, (float2*)sitelink_even, \ (float2*)fatlink_odd, (float2*)fatlink_even, \ (float2*)mulink_odd, (float2*)mulink_even, \ (float)mycoeff, kparam); \ } \ } BIND_MU_LINK(); dim3 blockDim(BLOCK_DIM , 1, 1); ENUMERATE_FUNCS_SAVE(mu,nu,save_staple); UNBIND_MU_LINK(); #undef CALL_FUNCTION } void llfatOneLinkKernel(FullGauge cudaFatLink, FullGauge cudaSiteLink, FullStaple cudaStaple, FullStaple cudaStaple1, QudaGaugeParam* param, double* act_path_coeff) { QudaPrecision prec = cudaSiteLink.precision; QudaReconstructType recon = cudaSiteLink.reconstruct; BIND_SITE_AND_FAT_LINK; int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3]; dim3 gridDim(volume/BLOCK_DIM,1,1); dim3 blockDim(BLOCK_DIM , 1, 1); staple_bytes = cudaStaple.bytes; if(prec == QUDA_DOUBLE_PRECISION){ if(recon == QUDA_RECONSTRUCT_NO){ llfatOneLink18Kernel<<<gridDim, blockDim>>>((double2*)cudaSiteLink.even, (double2*)cudaSiteLink.odd, (double2*)cudaFatLink.even, (double2*)cudaFatLink.odd, (double)act_path_coeff[0], (double)act_path_coeff[5]); }else{ llfatOneLink12Kernel<<<gridDim, blockDim>>>((double2*)cudaSiteLink.even, (double2*)cudaSiteLink.odd, (double2*)cudaFatLink.even, (double2*)cudaFatLink.odd, (double)act_path_coeff[0], (double)act_path_coeff[5]); } }else{ //single precision if(recon == QUDA_RECONSTRUCT_NO){ llfatOneLink18Kernel<<<gridDim, blockDim>>>((float2*)cudaSiteLink.even, (float2*)cudaSiteLink.odd, (float2*)cudaFatLink.even, (float2*)cudaFatLink.odd, (float)act_path_coeff[0], (float)act_path_coeff[5]); }else{ llfatOneLink12Kernel<<<gridDim, blockDim>>>((float2*)cudaSiteLink.even, (float2*)cudaSiteLink.odd, (float2*)cudaFatLink.even, (float2*)cudaFatLink.odd, (float)act_path_coeff[0], (float)act_path_coeff[5]); } } }
f7ad1166f527fe65280f61a222d75e48544a623c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lite/core/op_registry.h" #include "lite/kernels/cuda/relu_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { template <typename T> __global__ void ReluKernel(const int num, const T* input, T* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { #if __CUDA_ARCH__ >= 350 output[index] = __ldg(input + index) >= 0 ? __ldg(input + index) : 0; #else output[index] = input[index] >= 0 ? input[index] : 0; #endif } } void ReluCompute::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); int num = static_cast<int>(param.X->numel()); auto input = param.X->data<float>(); auto output = param.Out->mutable_data<float>(TARGET(kCUDA)); int threads = 1024; int blocks = (num + threads - 1) / threads; hipLaunchKernelGGL(( ReluKernel), dim3(blocks), dim3(threads), 0, stream, num, input, output); hipError_t error = hipGetLastError(); if (error != hipSuccess) LOG(INFO) << hipGetErrorString(error); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL( relu, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::ReluCompute, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))}) .Finalize();
f7ad1166f527fe65280f61a222d75e48544a623c.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lite/core/op_registry.h" #include "lite/kernels/cuda/relu_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { template <typename T> __global__ void ReluKernel(const int num, const T* input, T* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { #if __CUDA_ARCH__ >= 350 output[index] = __ldg(input + index) >= 0 ? __ldg(input + index) : 0; #else output[index] = input[index] >= 0 ? input[index] : 0; #endif } } void ReluCompute::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); int num = static_cast<int>(param.X->numel()); auto input = param.X->data<float>(); auto output = param.Out->mutable_data<float>(TARGET(kCUDA)); int threads = 1024; int blocks = (num + threads - 1) / threads; ReluKernel<<<blocks, threads, 0, stream>>>(num, input, output); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) LOG(INFO) << cudaGetErrorString(error); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL( relu, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::ReluCompute, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))}) .Finalize();
7c3b448a699470bcf3723a086345f8ee9147c44d.hip
// !!! This is a file automatically generated by hipify!!! //******************************************************************************************** //* This is GPU implementation of a Overlap-and-save method for calculating convolution. //* Copyright (C) 2019 Admek Karel //* //* Authors: Karel Adamek ( ORCID:0000-0003-2797-0595; https://github.com/KAdamek ), Wesley Armour ( ORCID:0000-0003-1756-3064 ), Sofia Dimoudi //******************************************************************************************** #include <iostream> #include <fstream> #include <iomanip> #include <hipfft.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "debug.h" #include "timer.h" #include "utils_cuda.h" #include "params.h" #define WARP 32 //#define TESTING int device=DEVICEID; class FFT_Params { public: static const int fft_exp = -1; static const int fft_length = -1; static const int warp = 32; }; class FFT_256 : public FFT_Params { public: static const int fft_exp = 8; static const int fft_length = 256; static const int fft_length_quarter = 64; static const int fft_length_half = 128; static const int fft_length_three_quarters = 192; }; class FFT_512 : public FFT_Params { public: static const int fft_exp = 9; static const int fft_length = 512; static const int fft_length_quarter = 128; static const int fft_length_half = 256; static const int fft_length_three_quarters = 384; }; class FFT_1024 : public FFT_Params { public: static const int fft_exp = 10; static const int fft_length = 1024; static const int fft_length_quarter = 256; static const int fft_length_half = 512; static const int fft_length_three_quarters = 768; }; class FFT_2048 : public FFT_Params { public: static const int fft_exp = 11; static const int fft_length = 2048; static const int fft_length_quarter = 512; static const int fft_length_half = 1024; static const int fft_length_three_quarters = 1536; }; class FFT_4096 : public FFT_Params { public: static const int fft_exp = 12; static const int fft_length = 4096; static const int fft_length_quarter = 1024; static const int fft_length_half = 2048; static const int fft_length_three_quarters = 3072; }; class FFT_ConstDirection { public: static const int fft_direction = -1; }; class FFT_forward : public FFT_ConstDirection { public: static const int fft_direction = 0; }; class FFT_inverse : public FFT_ConstDirection { public: static const int fft_direction = 1; }; __device__ __inline__ float2 Get_W_value(int N, int m){ float2 ctemp; sincosf ( -6.283185308f*fdividef( (float) m, (float) N), &ctemp.y, &ctemp.x); return(ctemp); } __device__ __inline__ float2 Get_W_value_inverse(int N, int m){ float2 ctemp; sincosf ( 6.283185308f*fdividef( (float) m, (float) N), &ctemp.y, &ctemp.x); return(ctemp); } __device__ __inline__ float shfl_xor(float *value, int par){ #if (CUDART_VERSION >= 9000) return(__shfl_xor_sync(0xffffffff, (*value), par)); #else return(__shfl_xor((*value), par)); #endif } template<class const_params> __inline__ __device__ void CT_DIT_FFT_4way(float2 *s_input){ float2 A_DFT_value, B_DFT_value, C_DFT_value, D_DFT_value; float2 W; float2 Aftemp, Bftemp, Cftemp, Dftemp; int local_id, warp_id; int j, m_param; int parity, itemp; int A_read_index, B_read_index, C_read_index, D_read_index; int PoT, PoTp1, q; local_id = threadIdx.x & (const_params::warp - 1); warp_id = threadIdx.x/const_params::warp; #ifdef TESTING int A_load_id, B_load_id, i, A_n, B_n; A_load_id = threadIdx.x; B_load_id = threadIdx.x + const_params::fft_length_quarter; A_n=threadIdx.x; B_n=threadIdx.x + const_params::fft_length_quarter; for(i=1; i<const_params::fft_exp; i++) { A_n >>= 1; B_n >>= 1; A_load_id <<= 1; A_load_id |= A_n & 1; B_load_id <<= 1; B_load_id |= B_n & 1; } A_load_id &= const_params::fft_length-1; B_load_id &= const_params::fft_length-1; //-----> Scrambling input A_DFT_value=s_input[A_load_id]; B_DFT_value=s_input[A_load_id + 1]; C_DFT_value=s_input[B_load_id]; D_DFT_value=s_input[B_load_id + 1]; __syncthreads(); s_input[threadIdx.x] = A_DFT_value; s_input[threadIdx.x + const_params::fft_length_half] = B_DFT_value; s_input[threadIdx.x + const_params::fft_length_quarter] = C_DFT_value; s_input[threadIdx.x + const_params::fft_length_three_quarters] = D_DFT_value; __syncthreads(); #endif //-----> FFT //--> PoT=1; PoTp1=2; //--> First iteration itemp=local_id&1; parity=(1-itemp*2); A_DFT_value=s_input[local_id + (warp_id<<2)*const_params::warp]; B_DFT_value=s_input[local_id + (warp_id<<2)*const_params::warp + const_params::warp]; C_DFT_value=s_input[local_id + (warp_id<<2)*const_params::warp + 2*const_params::warp]; D_DFT_value=s_input[local_id + (warp_id<<2)*const_params::warp + 3*const_params::warp]; __syncthreads(); A_DFT_value.x=parity*A_DFT_value.x + shfl_xor(&A_DFT_value.x, 1); A_DFT_value.y=parity*A_DFT_value.y + shfl_xor(&A_DFT_value.y, 1); B_DFT_value.x=parity*B_DFT_value.x + shfl_xor(&B_DFT_value.x, 1); B_DFT_value.y=parity*B_DFT_value.y + shfl_xor(&B_DFT_value.y, 1); C_DFT_value.x=parity*C_DFT_value.x + shfl_xor(&C_DFT_value.x, 1); C_DFT_value.y=parity*C_DFT_value.y + shfl_xor(&C_DFT_value.y, 1); D_DFT_value.x=parity*D_DFT_value.x + shfl_xor(&D_DFT_value.x, 1); D_DFT_value.y=parity*D_DFT_value.y + shfl_xor(&D_DFT_value.y, 1); //--> Second through Fifth iteration (no synchronization) PoT=2; PoTp1=4; for(q=1;q<5;q++){ m_param = (local_id & (PoTp1 - 1)); itemp = m_param>>q; parity=((itemp<<1)-1); W = Get_W_value_inverse(PoTp1, itemp*m_param); Aftemp.x = W.x*A_DFT_value.x - W.y*A_DFT_value.y; Aftemp.y = W.x*A_DFT_value.y + W.y*A_DFT_value.x; Bftemp.x = W.x*B_DFT_value.x - W.y*B_DFT_value.y; Bftemp.y = W.x*B_DFT_value.y + W.y*B_DFT_value.x; Cftemp.x = W.x*C_DFT_value.x - W.y*C_DFT_value.y; Cftemp.y = W.x*C_DFT_value.y + W.y*C_DFT_value.x; Dftemp.x = W.x*D_DFT_value.x - W.y*D_DFT_value.y; Dftemp.y = W.x*D_DFT_value.y + W.y*D_DFT_value.x; A_DFT_value.x = Aftemp.x + parity*shfl_xor(&Aftemp.x,PoT); A_DFT_value.y = Aftemp.y + parity*shfl_xor(&Aftemp.y,PoT); B_DFT_value.x = Bftemp.x + parity*shfl_xor(&Bftemp.x,PoT); B_DFT_value.y = Bftemp.y + parity*shfl_xor(&Bftemp.y,PoT); C_DFT_value.x = Cftemp.x + parity*shfl_xor(&Cftemp.x,PoT); C_DFT_value.y = Cftemp.y + parity*shfl_xor(&Cftemp.y,PoT); D_DFT_value.x = Dftemp.x + parity*shfl_xor(&Dftemp.x,PoT); D_DFT_value.y = Dftemp.y + parity*shfl_xor(&Dftemp.y,PoT); PoT=PoT<<1; PoTp1=PoTp1<<1; } itemp = local_id + (warp_id<<2)*const_params::warp; s_input[itemp] = A_DFT_value; s_input[itemp + const_params::warp] = B_DFT_value; s_input[itemp + 2*const_params::warp] = C_DFT_value; s_input[itemp + 3*const_params::warp] = D_DFT_value; for(q=5;q<(const_params::fft_exp-1);q++){ __syncthreads(); m_param = threadIdx.x & (PoT - 1); j=threadIdx.x>>q; W=Get_W_value_inverse(PoTp1,m_param); A_read_index=j*(PoTp1<<1) + m_param; B_read_index=j*(PoTp1<<1) + m_param + PoT; C_read_index=j*(PoTp1<<1) + m_param + PoTp1; D_read_index=j*(PoTp1<<1) + m_param + 3*PoT; Aftemp = s_input[A_read_index]; Bftemp = s_input[B_read_index]; A_DFT_value.x=Aftemp.x + W.x*Bftemp.x - W.y*Bftemp.y; A_DFT_value.y=Aftemp.y + W.x*Bftemp.y + W.y*Bftemp.x; B_DFT_value.x=Aftemp.x - W.x*Bftemp.x + W.y*Bftemp.y; B_DFT_value.y=Aftemp.y - W.x*Bftemp.y - W.y*Bftemp.x; Cftemp = s_input[C_read_index]; Dftemp = s_input[D_read_index]; C_DFT_value.x=Cftemp.x + W.x*Dftemp.x - W.y*Dftemp.y; C_DFT_value.y=Cftemp.y + W.x*Dftemp.y + W.y*Dftemp.x; D_DFT_value.x=Cftemp.x - W.x*Dftemp.x + W.y*Dftemp.y; D_DFT_value.y=Cftemp.y - W.x*Dftemp.y - W.y*Dftemp.x; s_input[A_read_index]=A_DFT_value; s_input[B_read_index]=B_DFT_value; s_input[C_read_index]=C_DFT_value; s_input[D_read_index]=D_DFT_value; PoT=PoT<<1; PoTp1=PoTp1<<1; } //last iteration __syncthreads(); m_param = threadIdx.x; W=Get_W_value_inverse(PoTp1,m_param); A_read_index = m_param; B_read_index = m_param + PoT; C_read_index = m_param + (PoT>>1); D_read_index = m_param + 3*(PoT>>1); Aftemp = s_input[A_read_index]; Bftemp = s_input[B_read_index]; A_DFT_value.x=Aftemp.x + W.x*Bftemp.x - W.y*Bftemp.y; A_DFT_value.y=Aftemp.y + W.x*Bftemp.y + W.y*Bftemp.x; B_DFT_value.x=Aftemp.x - W.x*Bftemp.x + W.y*Bftemp.y; B_DFT_value.y=Aftemp.y - W.x*Bftemp.y - W.y*Bftemp.x; Cftemp = s_input[C_read_index]; Dftemp = s_input[D_read_index]; C_DFT_value.x=Cftemp.x - W.y*Dftemp.x - W.x*Dftemp.y; C_DFT_value.y=Cftemp.y - W.y*Dftemp.y + W.x*Dftemp.x; D_DFT_value.x=Cftemp.x + W.y*Dftemp.x + W.x*Dftemp.y; D_DFT_value.y=Cftemp.y + W.y*Dftemp.y - W.x*Dftemp.x; s_input[A_read_index]=A_DFT_value; s_input[B_read_index]=B_DFT_value; s_input[C_read_index]=C_DFT_value; s_input[D_read_index]=D_DFT_value; __syncthreads(); } template<class const_params> __inline__ __device__ void CT_DIF_FFT_4way(float2 *s_input){ float2 A_DFT_value, B_DFT_value, C_DFT_value, D_DFT_value; float2 W; float2 Aftemp, Bftemp, Cftemp, Dftemp; int local_id, warp_id; int j, m_param, parity; int A_read_index, B_read_index, C_read_index, D_read_index; int PoT, PoTm1, q; local_id = threadIdx.x & (WARP - 1); warp_id = threadIdx.x/WARP; //-----> FFT //--> PoTm1 = const_params::fft_length_half; PoT = const_params::fft_length; //Highest iteration m_param = threadIdx.x; j=0; A_read_index = m_param; B_read_index = m_param + PoTm1; C_read_index = m_param + (PoTm1>>1); D_read_index = m_param + 3*(PoTm1>>1); W=Get_W_value(PoT, m_param); Aftemp = s_input[A_read_index]; Bftemp = s_input[B_read_index]; Cftemp = s_input[C_read_index]; Dftemp = s_input[D_read_index]; A_DFT_value.x = Aftemp.x + Bftemp.x; A_DFT_value.y = Aftemp.y + Bftemp.y; B_DFT_value.x = W.x*(Aftemp.x - Bftemp.x) - W.y*(Aftemp.y - Bftemp.y); B_DFT_value.y = W.x*(Aftemp.y - Bftemp.y) + W.y*(Aftemp.x - Bftemp.x); C_DFT_value.x = Cftemp.x + Dftemp.x; C_DFT_value.y = Cftemp.y + Dftemp.y; D_DFT_value.x = W.y*(Cftemp.x - Dftemp.x) + W.x*(Cftemp.y - Dftemp.y); D_DFT_value.y = W.y*(Cftemp.y - Dftemp.y) - W.x*(Cftemp.x - Dftemp.x); s_input[A_read_index]=A_DFT_value; s_input[B_read_index]=B_DFT_value; s_input[C_read_index]=C_DFT_value; s_input[D_read_index]=D_DFT_value; PoT=PoT>>1; PoTm1=PoTm1>>1; for(q=(const_params::fft_exp-2);q>4;q--){ __syncthreads(); m_param = threadIdx.x & (PoTm1 - 1); j=threadIdx.x>>q; W=Get_W_value(PoT, m_param); A_read_index=j*(PoT<<1) + m_param; B_read_index=j*(PoT<<1) + m_param + PoTm1; C_read_index=j*(PoT<<1) + m_param + PoT; D_read_index=j*(PoT<<1) + m_param + 3*PoTm1; Aftemp = s_input[A_read_index]; Bftemp = s_input[B_read_index]; Cftemp = s_input[C_read_index]; Dftemp = s_input[D_read_index]; A_DFT_value.x = Aftemp.x + Bftemp.x; A_DFT_value.y = Aftemp.y + Bftemp.y; C_DFT_value.x = Cftemp.x + Dftemp.x; C_DFT_value.y = Cftemp.y + Dftemp.y; B_DFT_value.x = W.x*(Aftemp.x - Bftemp.x) - W.y*(Aftemp.y - Bftemp.y); B_DFT_value.y = W.x*(Aftemp.y - Bftemp.y) + W.y*(Aftemp.x - Bftemp.x); D_DFT_value.x = W.x*(Cftemp.x - Dftemp.x) - W.y*(Cftemp.y - Dftemp.y); D_DFT_value.y = W.x*(Cftemp.y - Dftemp.y) + W.y*(Cftemp.x - Dftemp.x); s_input[A_read_index]=A_DFT_value; s_input[B_read_index]=B_DFT_value; s_input[C_read_index]=C_DFT_value; s_input[D_read_index]=D_DFT_value; PoT=PoT>>1; PoTm1=PoTm1>>1; } __syncthreads(); j = local_id + (warp_id<<2)*WARP; A_DFT_value = s_input[j]; B_DFT_value = s_input[j + WARP]; C_DFT_value = s_input[j + 2*WARP]; D_DFT_value = s_input[j + 3*WARP]; for(q=4;q>=0;q--){ m_param = (local_id & (PoT - 1)); j = m_param>>q; parity=(1-j*2); W = Get_W_value(PoT, j*(m_param-PoTm1)); Aftemp.x = parity*A_DFT_value.x + shfl_xor(&A_DFT_value.x, PoTm1); Aftemp.y = parity*A_DFT_value.y + shfl_xor(&A_DFT_value.y, PoTm1); Bftemp.x = parity*B_DFT_value.x + shfl_xor(&B_DFT_value.x, PoTm1); Bftemp.y = parity*B_DFT_value.y + shfl_xor(&B_DFT_value.y, PoTm1); Cftemp.x = parity*C_DFT_value.x + shfl_xor(&C_DFT_value.x, PoTm1); Cftemp.y = parity*C_DFT_value.y + shfl_xor(&C_DFT_value.y, PoTm1); Dftemp.x = parity*D_DFT_value.x + shfl_xor(&D_DFT_value.x, PoTm1); Dftemp.y = parity*D_DFT_value.y + shfl_xor(&D_DFT_value.y, PoTm1); A_DFT_value.x = W.x*Aftemp.x - W.y*Aftemp.y; A_DFT_value.y = W.x*Aftemp.y + W.y*Aftemp.x; B_DFT_value.x = W.x*Bftemp.x - W.y*Bftemp.y; B_DFT_value.y = W.x*Bftemp.y + W.y*Bftemp.x; C_DFT_value.x = W.x*Cftemp.x - W.y*Cftemp.y; C_DFT_value.y = W.x*Cftemp.y + W.y*Cftemp.x; D_DFT_value.x = W.x*Dftemp.x - W.y*Dftemp.y; D_DFT_value.y = W.x*Dftemp.y + W.y*Dftemp.x; PoT=PoT>>1; PoTm1=PoTm1>>1; } j = local_id + (warp_id<<2)*WARP; s_input[j] = A_DFT_value; s_input[j + WARP] = B_DFT_value; s_input[j + 2*WARP] = C_DFT_value; s_input[j + 3*WARP] = D_DFT_value; __syncthreads(); #ifdef TESTING __syncthreads(); int A_load_id, B_load_id, i, A_n, B_n; A_load_id = threadIdx.x; B_load_id = threadIdx.x + const_params::fft_length_quarter; A_n=threadIdx.x; B_n=threadIdx.x + const_params::fft_length_quarter; for(i=1; i<const_params::fft_exp; i++) { A_n >>= 1; B_n >>= 1; A_load_id <<= 1; A_load_id |= A_n & 1; B_load_id <<= 1; B_load_id |= B_n & 1; } A_load_id &= const_params::fft_length-1; B_load_id &= const_params::fft_length-1; //-----> Scrambling input A_DFT_value=s_input[A_load_id]; B_DFT_value=s_input[A_load_id + 1]; C_DFT_value=s_input[B_load_id]; D_DFT_value=s_input[B_load_id + 1]; __syncthreads(); s_input[threadIdx.x] = A_DFT_value; s_input[threadIdx.x + const_params::fft_length_half] = B_DFT_value; s_input[threadIdx.x + const_params::fft_length_quarter] = C_DFT_value; s_input[threadIdx.x + const_params::fft_length_three_quarters] = D_DFT_value; __syncthreads(); #endif } template<class const_params> __inline__ __device__ void FFT_CT_DIT_4elem_2vertical_no_reorder(float2 *s_input1, float2 *s_input2){ float2 A_DFT_value1, B_DFT_value1, C_DFT_value1, D_DFT_value1; float2 A_DFT_value2, B_DFT_value2, C_DFT_value2, D_DFT_value2; float2 W; float2 Aftemp1, Bftemp1, Cftemp1, Dftemp1; float2 Aftemp2, Bftemp2, Cftemp2, Dftemp2; int local_id, warp_id; int j, m_param; int parity, itemp; int A_read_index, B_read_index, C_read_index, D_read_index; int PoT, PoTp1, q; local_id = threadIdx.x & (WARP - 1); warp_id = threadIdx.x/WARP; //-----> FFT //--> PoT=1; PoTp1=2; //--> First iteration itemp=local_id&1; parity=(1-itemp*2); A_DFT_value1=s_input1[local_id + (warp_id<<2)*WARP]; B_DFT_value1=s_input1[local_id + (warp_id<<2)*WARP + WARP]; C_DFT_value1=s_input1[local_id + (warp_id<<2)*WARP + 2*WARP]; D_DFT_value1=s_input1[local_id + (warp_id<<2)*WARP + 3*WARP]; A_DFT_value2=s_input2[local_id + (warp_id<<2)*WARP]; B_DFT_value2=s_input2[local_id + (warp_id<<2)*WARP + WARP]; C_DFT_value2=s_input2[local_id + (warp_id<<2)*WARP + 2*WARP]; D_DFT_value2=s_input2[local_id + (warp_id<<2)*WARP + 3*WARP]; __syncthreads(); A_DFT_value1.x=parity*A_DFT_value1.x + shfl_xor(&A_DFT_value1.x,1); A_DFT_value1.y=parity*A_DFT_value1.y + shfl_xor(&A_DFT_value1.y,1); B_DFT_value1.x=parity*B_DFT_value1.x + shfl_xor(&B_DFT_value1.x,1); B_DFT_value1.y=parity*B_DFT_value1.y + shfl_xor(&B_DFT_value1.y,1); C_DFT_value1.x=parity*C_DFT_value1.x + shfl_xor(&C_DFT_value1.x,1); C_DFT_value1.y=parity*C_DFT_value1.y + shfl_xor(&C_DFT_value1.y,1); D_DFT_value1.x=parity*D_DFT_value1.x + shfl_xor(&D_DFT_value1.x,1); D_DFT_value1.y=parity*D_DFT_value1.y + shfl_xor(&D_DFT_value1.y,1); A_DFT_value2.x=parity*A_DFT_value2.x + shfl_xor(&A_DFT_value2.x,1); A_DFT_value2.y=parity*A_DFT_value2.y + shfl_xor(&A_DFT_value2.y,1); B_DFT_value2.x=parity*B_DFT_value2.x + shfl_xor(&B_DFT_value2.x,1); B_DFT_value2.y=parity*B_DFT_value2.y + shfl_xor(&B_DFT_value2.y,1); C_DFT_value2.x=parity*C_DFT_value2.x + shfl_xor(&C_DFT_value2.x,1); C_DFT_value2.y=parity*C_DFT_value2.y + shfl_xor(&C_DFT_value2.y,1); D_DFT_value2.x=parity*D_DFT_value2.x + shfl_xor(&D_DFT_value2.x,1); D_DFT_value2.y=parity*D_DFT_value2.y + shfl_xor(&D_DFT_value2.y,1); //--> Second through Fifth iteration (no synchronization) PoT=2; PoTp1=4; for(q=1;q<5;q++){ m_param = (local_id & (PoTp1 - 1)); itemp = m_param>>q; parity=((itemp<<1)-1); W = Get_W_value_inverse(PoTp1, itemp*m_param); Aftemp1.x = W.x*A_DFT_value1.x - W.y*A_DFT_value1.y; Aftemp1.y = W.x*A_DFT_value1.y + W.y*A_DFT_value1.x; Bftemp1.x = W.x*B_DFT_value1.x - W.y*B_DFT_value1.y; Bftemp1.y = W.x*B_DFT_value1.y + W.y*B_DFT_value1.x; Cftemp1.x = W.x*C_DFT_value1.x - W.y*C_DFT_value1.y; Cftemp1.y = W.x*C_DFT_value1.y + W.y*C_DFT_value1.x; Dftemp1.x = W.x*D_DFT_value1.x - W.y*D_DFT_value1.y; Dftemp1.y = W.x*D_DFT_value1.y + W.y*D_DFT_value1.x; Aftemp2.x = W.x*A_DFT_value2.x - W.y*A_DFT_value2.y; Aftemp2.y = W.x*A_DFT_value2.y + W.y*A_DFT_value2.x; Bftemp2.x = W.x*B_DFT_value2.x - W.y*B_DFT_value2.y; Bftemp2.y = W.x*B_DFT_value2.y + W.y*B_DFT_value2.x; Cftemp2.x = W.x*C_DFT_value2.x - W.y*C_DFT_value2.y; Cftemp2.y = W.x*C_DFT_value2.y + W.y*C_DFT_value2.x; Dftemp2.x = W.x*D_DFT_value2.x - W.y*D_DFT_value2.y; Dftemp2.y = W.x*D_DFT_value2.y + W.y*D_DFT_value2.x; A_DFT_value1.x = Aftemp1.x + parity*shfl_xor(&Aftemp1.x,PoT); A_DFT_value1.y = Aftemp1.y + parity*shfl_xor(&Aftemp1.y,PoT); B_DFT_value1.x = Bftemp1.x + parity*shfl_xor(&Bftemp1.x,PoT); B_DFT_value1.y = Bftemp1.y + parity*shfl_xor(&Bftemp1.y,PoT); C_DFT_value1.x = Cftemp1.x + parity*shfl_xor(&Cftemp1.x,PoT); C_DFT_value1.y = Cftemp1.y + parity*shfl_xor(&Cftemp1.y,PoT); D_DFT_value1.x = Dftemp1.x + parity*shfl_xor(&Dftemp1.x,PoT); D_DFT_value1.y = Dftemp1.y + parity*shfl_xor(&Dftemp1.y,PoT); A_DFT_value2.x = Aftemp2.x + parity*shfl_xor(&Aftemp2.x,PoT); A_DFT_value2.y = Aftemp2.y + parity*shfl_xor(&Aftemp2.y,PoT); B_DFT_value2.x = Bftemp2.x + parity*shfl_xor(&Bftemp2.x,PoT); B_DFT_value2.y = Bftemp2.y + parity*shfl_xor(&Bftemp2.y,PoT); C_DFT_value2.x = Cftemp2.x + parity*shfl_xor(&Cftemp2.x,PoT); C_DFT_value2.y = Cftemp2.y + parity*shfl_xor(&Cftemp2.y,PoT); D_DFT_value2.x = Dftemp2.x + parity*shfl_xor(&Dftemp2.x,PoT); D_DFT_value2.y = Dftemp2.y + parity*shfl_xor(&Dftemp2.y,PoT); PoT=PoT<<1; PoTp1=PoTp1<<1; } itemp = local_id + (warp_id<<2)*WARP; s_input1[itemp] = A_DFT_value1; s_input1[itemp + WARP] = B_DFT_value1; s_input1[itemp + 2*WARP] = C_DFT_value1; s_input1[itemp + 3*WARP] = D_DFT_value1; s_input2[itemp] = A_DFT_value2; s_input2[itemp + WARP] = B_DFT_value2; s_input2[itemp + 2*WARP] = C_DFT_value2; s_input2[itemp + 3*WARP] = D_DFT_value2; for(q=5;q<(const_params::fft_exp-1);q++){ __syncthreads(); m_param = threadIdx.x & (PoT - 1); j=threadIdx.x>>q; W=Get_W_value_inverse(PoTp1,m_param); A_read_index=j*(PoTp1<<1) + m_param; B_read_index=j*(PoTp1<<1) + m_param + PoT; C_read_index=j*(PoTp1<<1) + m_param + PoTp1; D_read_index=j*(PoTp1<<1) + m_param + 3*PoT; Aftemp1 = s_input1[A_read_index]; Bftemp1 = s_input1[B_read_index]; A_DFT_value1.x=Aftemp1.x + W.x*Bftemp1.x - W.y*Bftemp1.y; A_DFT_value1.y=Aftemp1.y + W.x*Bftemp1.y + W.y*Bftemp1.x; B_DFT_value1.x=Aftemp1.x - W.x*Bftemp1.x + W.y*Bftemp1.y; B_DFT_value1.y=Aftemp1.y - W.x*Bftemp1.y - W.y*Bftemp1.x; Aftemp2 = s_input2[A_read_index]; Bftemp2 = s_input2[B_read_index]; A_DFT_value2.x=Aftemp2.x + W.x*Bftemp2.x - W.y*Bftemp2.y; A_DFT_value2.y=Aftemp2.y + W.x*Bftemp2.y + W.y*Bftemp2.x; B_DFT_value2.x=Aftemp2.x - W.x*Bftemp2.x + W.y*Bftemp2.y; B_DFT_value2.y=Aftemp2.y - W.x*Bftemp2.y - W.y*Bftemp2.x; Cftemp1 = s_input1[C_read_index]; Dftemp1 = s_input1[D_read_index]; C_DFT_value1.x=Cftemp1.x + W.x*Dftemp1.x - W.y*Dftemp1.y; C_DFT_value1.y=Cftemp1.y + W.x*Dftemp1.y + W.y*Dftemp1.x; D_DFT_value1.x=Cftemp1.x - W.x*Dftemp1.x + W.y*Dftemp1.y; D_DFT_value1.y=Cftemp1.y - W.x*Dftemp1.y - W.y*Dftemp1.x; Cftemp2 = s_input2[C_read_index]; Dftemp2 = s_input2[D_read_index]; C_DFT_value2.x=Cftemp2.x + W.x*Dftemp2.x - W.y*Dftemp2.y; C_DFT_value2.y=Cftemp2.y + W.x*Dftemp2.y + W.y*Dftemp2.x; D_DFT_value2.x=Cftemp2.x - W.x*Dftemp2.x + W.y*Dftemp2.y; D_DFT_value2.y=Cftemp2.y - W.x*Dftemp2.y - W.y*Dftemp2.x; s_input1[A_read_index]=A_DFT_value1; s_input1[B_read_index]=B_DFT_value1; s_input1[C_read_index]=C_DFT_value1; s_input1[D_read_index]=D_DFT_value1; s_input2[A_read_index]=A_DFT_value2; s_input2[B_read_index]=B_DFT_value2; s_input2[C_read_index]=C_DFT_value2; s_input2[D_read_index]=D_DFT_value2; PoT=PoT<<1; PoTp1=PoTp1<<1; } //last iteration __syncthreads(); m_param = threadIdx.x; W=Get_W_value_inverse(PoTp1,m_param); A_read_index = m_param; B_read_index = m_param + PoT; C_read_index = m_param + (PoT>>1); D_read_index = m_param + 3*(PoT>>1); Aftemp1 = s_input1[A_read_index]; Bftemp1 = s_input1[B_read_index]; A_DFT_value1.x=Aftemp1.x + W.x*Bftemp1.x - W.y*Bftemp1.y; A_DFT_value1.y=Aftemp1.y + W.x*Bftemp1.y + W.y*Bftemp1.x; B_DFT_value1.x=Aftemp1.x - W.x*Bftemp1.x + W.y*Bftemp1.y; B_DFT_value1.y=Aftemp1.y - W.x*Bftemp1.y - W.y*Bftemp1.x; Aftemp2 = s_input2[A_read_index]; Bftemp2 = s_input2[B_read_index]; A_DFT_value2.x=Aftemp2.x + W.x*Bftemp2.x - W.y*Bftemp2.y; A_DFT_value2.y=Aftemp2.y + W.x*Bftemp2.y + W.y*Bftemp2.x; B_DFT_value2.x=Aftemp2.x - W.x*Bftemp2.x + W.y*Bftemp2.y; B_DFT_value2.y=Aftemp2.y - W.x*Bftemp2.y - W.y*Bftemp2.x; Cftemp1 = s_input1[C_read_index]; Dftemp1 = s_input1[D_read_index]; C_DFT_value1.x=Cftemp1.x - W.y*Dftemp1.x - W.x*Dftemp1.y; C_DFT_value1.y=Cftemp1.y - W.y*Dftemp1.y + W.x*Dftemp1.x; D_DFT_value1.x=Cftemp1.x + W.y*Dftemp1.x + W.x*Dftemp1.y; D_DFT_value1.y=Cftemp1.y + W.y*Dftemp1.y - W.x*Dftemp1.x; Cftemp2 = s_input2[C_read_index]; Dftemp2 = s_input2[D_read_index]; C_DFT_value2.x=Cftemp2.x - W.y*Dftemp2.x - W.x*Dftemp2.y; C_DFT_value2.y=Cftemp2.y - W.y*Dftemp2.y + W.x*Dftemp2.x; D_DFT_value2.x=Cftemp2.x + W.y*Dftemp2.x + W.x*Dftemp2.y; D_DFT_value2.y=Cftemp2.y + W.y*Dftemp2.y - W.x*Dftemp2.x; s_input1[A_read_index]=A_DFT_value1; s_input1[B_read_index]=B_DFT_value1; s_input1[C_read_index]=C_DFT_value1; s_input1[D_read_index]=D_DFT_value1; s_input2[A_read_index]=A_DFT_value2; s_input2[B_read_index]=B_DFT_value2; s_input2[C_read_index]=C_DFT_value2; s_input2[D_read_index]=D_DFT_value2; __syncthreads(); } template<class const_params> __global__ void k_customFFT_GPU_forward(float2 *d_input, float2* d_output) { extern __shared__ float2 s_input[]; s_input[threadIdx.x] = d_input[threadIdx.x + blockIdx.x*const_params::fft_length]; s_input[threadIdx.x + const_params::fft_length_quarter] = d_input[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_quarter]; s_input[threadIdx.x + const_params::fft_length_half] = d_input[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_half]; s_input[threadIdx.x + const_params::fft_length_three_quarters] = d_input[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_three_quarters]; __syncthreads(); CT_DIF_FFT_4way<const_params>(s_input); __syncthreads(); d_output[threadIdx.x + blockIdx.x*const_params::fft_length] = s_input[threadIdx.x]; d_output[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_quarter] = s_input[threadIdx.x + const_params::fft_length_quarter]; d_output[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_half] = s_input[threadIdx.x + const_params::fft_length_half]; d_output[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_three_quarters] = s_input[threadIdx.x + const_params::fft_length_three_quarters]; } template<class const_params> __device__ __inline__ void prepare_signal_4elem(float2* s_signal, float2 const* __restrict__ d_input_signal, int signal_length, int useful_part_size, int offset) { int pos = blockIdx.x*useful_part_size; pos = blockIdx.x*useful_part_size + threadIdx.x - offset; s_signal[threadIdx.x].x = 0; s_signal[threadIdx.x].y = 0; s_signal[threadIdx.x + const_params::fft_length_quarter].x = 0; s_signal[threadIdx.x + const_params::fft_length_quarter].y = 0; s_signal[threadIdx.x + const_params::fft_length_half].x = 0; s_signal[threadIdx.x + const_params::fft_length_half].y = 0; s_signal[threadIdx.x + const_params::fft_length_three_quarters].x = 0; s_signal[threadIdx.x + const_params::fft_length_three_quarters].y = 0; if( pos>=0 && pos<signal_length ) s_signal[threadIdx.x] = d_input_signal[pos]; if( (pos + const_params::fft_length_quarter)>=0 && (pos + const_params::fft_length_quarter)<signal_length ) s_signal[threadIdx.x + const_params::fft_length_quarter] = d_input_signal[pos + const_params::fft_length_quarter]; if( (pos + const_params::fft_length_half)>=0 && (pos + const_params::fft_length_half)<signal_length ) s_signal[threadIdx.x + const_params::fft_length_half] = d_input_signal[pos + const_params::fft_length_half]; if( (pos + const_params::fft_length_three_quarters)>=0 && (pos + const_params::fft_length_three_quarters)<signal_length ) s_signal[threadIdx.x + const_params::fft_length_three_quarters] = d_input_signal[pos + const_params::fft_length_three_quarters]; } template<class const_params> __global__ void k_GPU_conv_OLS_via_customFFT( float2 const* __restrict__ d_input_signal, float2 *d_output_plane, float2 const* __restrict__ d_filters, int signal_length, int useful_part_size, int offset, int nConvolutions, int nFilters) { extern __shared__ float2 s_input_1[]; float2 r_filter_1[4]; float2 signal[4]; int pos, t; // Loading signal segment prepare_signal_4elem<const_params>(s_input_1, d_input_signal, signal_length, useful_part_size, offset); offset = ((const_params::fft_length - useful_part_size + 1)>>1); // Forward FFT on input signal CT_DIF_FFT_4way<const_params>(s_input_1); // Storing FFTed signal for reuse signal[0]=s_input_1[threadIdx.x]; signal[1]=s_input_1[threadIdx.x + const_params::fft_length_quarter]; signal[2]=s_input_1[threadIdx.x + const_params::fft_length_half]; signal[3]=s_input_1[threadIdx.x + const_params::fft_length_three_quarters]; for(t=0; t<nFilters; t++){ // Loading filters pos = t*const_params::fft_length + threadIdx.x; r_filter_1[0]=__ldg(&d_filters[pos]); r_filter_1[1]=__ldg(&d_filters[pos + const_params::fft_length_quarter]); r_filter_1[2]=__ldg(&d_filters[pos + const_params::fft_length_half]); r_filter_1[3]=__ldg(&d_filters[pos + const_params::fft_length_three_quarters]); // Convolution (complex multiplication) s_input_1[threadIdx.x].x = (r_filter_1[0].x*signal[0].x - r_filter_1[0].y*signal[0].y)/((float) const_params::fft_length); s_input_1[threadIdx.x].y = (r_filter_1[0].x*signal[0].y + r_filter_1[0].y*signal[0].x)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_quarter].x = (r_filter_1[1].x*signal[1].x - r_filter_1[1].y*signal[1].y)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_quarter].y = (r_filter_1[1].x*signal[1].y + r_filter_1[1].y*signal[1].x)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_half].x = (r_filter_1[2].x*signal[2].x - r_filter_1[2].y*signal[2].y)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_half].y = (r_filter_1[2].x*signal[2].y + r_filter_1[2].y*signal[2].x)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_three_quarters].x = (r_filter_1[3].x*signal[3].x - r_filter_1[3].y*signal[3].y)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_three_quarters].y = (r_filter_1[3].x*signal[3].y + r_filter_1[3].y*signal[3].x)/((float) const_params::fft_length); __syncthreads(); //----------> Inverse FFT CT_DIT_FFT_4way<const_params>(s_input_1); //----------< // Writing out the clean part of the segment pos = t*useful_part_size*nConvolutions + blockIdx.x*useful_part_size + threadIdx.x; if( threadIdx.x>=offset && threadIdx.x<(useful_part_size+offset) ) { d_output_plane[pos - offset] = s_input_1[threadIdx.x]; } if( (threadIdx.x + const_params::fft_length_quarter)>=offset && (threadIdx.x + const_params::fft_length_quarter)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_quarter - offset] = s_input_1[threadIdx.x + const_params::fft_length_quarter]; } if( (threadIdx.x + const_params::fft_length_half)>=offset && (threadIdx.x + const_params::fft_length_half)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_half - offset] = s_input_1[threadIdx.x + const_params::fft_length_half]; } if( (threadIdx.x + const_params::fft_length_three_quarters)>=offset && (threadIdx.x + const_params::fft_length_three_quarters)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_three_quarters - offset] = s_input_1[threadIdx.x + const_params::fft_length_three_quarters]; } __syncthreads(); } } template<class const_params> __global__ void k_GPU_conv_OLS_via_customFFT_2filters( float2 const* __restrict__ d_input_signal, float2 *d_output_plane, float2 const* __restrict__ d_filters, int signal_length, int useful_part_size, int offset, int nConvolutions, int nFilters) { __shared__ float2 s_input_1[const_params::fft_length]; __shared__ float2 s_input_2[const_params::fft_length]; float2 r_filter_1[4]; float2 r_filter_2[4]; float2 signal[4]; int pos, t; // Loading data prepare_signal_4elem<const_params>(s_input_1, d_input_signal, signal_length, useful_part_size, offset); // Forward FFT on input signal CT_DIF_FFT_4way<const_params>(s_input_1); // Storing FFTed signal for reuse signal[0]=s_input_1[threadIdx.x]; signal[1]=s_input_1[threadIdx.x + const_params::fft_length_quarter]; signal[2]=s_input_1[threadIdx.x + const_params::fft_length_half]; signal[3]=s_input_1[threadIdx.x + const_params::fft_length_three_quarters]; for(t=0; t<(nFilters>>1); t++){ // Loading filters pos = 2*t*const_params::fft_length + threadIdx.x; r_filter_1[0]=__ldg(&d_filters[pos]); r_filter_1[1]=__ldg(&d_filters[pos + const_params::fft_length_quarter]); r_filter_1[2]=__ldg(&d_filters[pos + const_params::fft_length_half]); r_filter_1[3]=__ldg(&d_filters[pos + const_params::fft_length_three_quarters]); r_filter_2[0]=__ldg(&d_filters[pos + const_params::fft_length]); r_filter_2[1]=__ldg(&d_filters[pos + const_params::fft_length + const_params::fft_length_quarter]); r_filter_2[2]=__ldg(&d_filters[pos + const_params::fft_length + const_params::fft_length_half]); r_filter_2[3]=__ldg(&d_filters[pos + const_params::fft_length + const_params::fft_length_three_quarters]); // Convolution (complex multiplication) s_input_1[threadIdx.x].x = r_filter_1[0].x*signal[0].x - r_filter_1[0].y*signal[0].y; s_input_1[threadIdx.x].y = r_filter_1[0].x*signal[0].y + r_filter_1[0].y*signal[0].x; s_input_1[threadIdx.x + const_params::fft_length_quarter].x = r_filter_1[1].x*signal[1].x - r_filter_1[1].y*signal[1].y; s_input_1[threadIdx.x + const_params::fft_length_quarter].y = r_filter_1[1].x*signal[1].y + r_filter_1[1].y*signal[1].x; s_input_1[threadIdx.x + const_params::fft_length_half].x = r_filter_1[2].x*signal[2].x - r_filter_1[2].y*signal[2].y; s_input_1[threadIdx.x + const_params::fft_length_half].y = r_filter_1[2].x*signal[2].y + r_filter_1[2].y*signal[2].x; s_input_1[threadIdx.x + const_params::fft_length_three_quarters].x = r_filter_1[3].x*signal[3].x - r_filter_1[3].y*signal[3].y; s_input_1[threadIdx.x + const_params::fft_length_three_quarters].y = r_filter_1[3].x*signal[3].y + r_filter_1[3].y*signal[3].x; s_input_2[threadIdx.x].x = r_filter_2[0].x*signal[0].x - r_filter_2[0].y*signal[0].y; s_input_2[threadIdx.x].y = r_filter_2[0].x*signal[0].y + r_filter_2[0].y*signal[0].x; s_input_2[threadIdx.x + const_params::fft_length_quarter].x = r_filter_2[1].x*signal[1].x - r_filter_2[1].y*signal[1].y; s_input_2[threadIdx.x + const_params::fft_length_quarter].y = r_filter_2[1].x*signal[1].y + r_filter_2[1].y*signal[1].x; s_input_2[threadIdx.x + const_params::fft_length_half].x = r_filter_2[2].x*signal[2].x - r_filter_2[2].y*signal[2].y; s_input_2[threadIdx.x + const_params::fft_length_half].y = r_filter_2[2].x*signal[2].y + r_filter_2[2].y*signal[2].x; s_input_2[threadIdx.x + const_params::fft_length_three_quarters].x = r_filter_2[3].x*signal[3].x - r_filter_2[3].y*signal[3].y; s_input_2[threadIdx.x + const_params::fft_length_three_quarters].y = r_filter_2[3].x*signal[3].y + r_filter_2[3].y*signal[3].x; __syncthreads(); //----------> Inverse FFT FFT_CT_DIT_4elem_2vertical_no_reorder<const_params>(s_input_1, s_input_2); //----------< // Writing out the clean part of the segment // First convolution pos = 2*t*useful_part_size*nConvolutions + blockIdx.x*useful_part_size + threadIdx.x; if( threadIdx.x>=offset && threadIdx.x<(useful_part_size+offset) ) { d_output_plane[pos - offset] = s_input_1[threadIdx.x]; } if( (threadIdx.x + const_params::fft_length_quarter)>=offset && (threadIdx.x + const_params::fft_length_quarter)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_quarter - offset] = s_input_1[threadIdx.x + const_params::fft_length_quarter]; } if( (threadIdx.x + const_params::fft_length_half)>=offset && (threadIdx.x + const_params::fft_length_half)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_half - offset] = s_input_1[threadIdx.x + const_params::fft_length_half]; } if( (threadIdx.x + const_params::fft_length_three_quarters)>=offset && (threadIdx.x + const_params::fft_length_three_quarters)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_three_quarters - offset] = s_input_1[threadIdx.x + const_params::fft_length_three_quarters]; } // Second convolution pos = pos + useful_part_size*nConvolutions; if( threadIdx.x>=offset && threadIdx.x<(useful_part_size+offset) ) { d_output_plane[pos - offset] = s_input_2[threadIdx.x]; } if( (threadIdx.x + const_params::fft_length_quarter)>=offset && (threadIdx.x + const_params::fft_length_quarter)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_quarter - offset] = s_input_2[threadIdx.x + const_params::fft_length_quarter]; } if( (threadIdx.x + const_params::fft_length_half)>=offset && (threadIdx.x + const_params::fft_length_half)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_half - offset] = s_input_2[threadIdx.x + const_params::fft_length_half]; } if( (threadIdx.x + const_params::fft_length_three_quarters)>=offset && (threadIdx.x + const_params::fft_length_three_quarters)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_three_quarters - offset] = s_input_2[threadIdx.x + const_params::fft_length_three_quarters]; } __syncthreads(); } } //***************************************************************************** //***************************************************************************** //***************************************************************************** void CONV_init(){ //---------> Specific nVidia stuff hipDeviceSetCacheConfig(hipFuncCachePreferShared); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); } void forwardCustomFFT(float2 *d_filters, int FFT_size, int nFilters){ dim3 gridSize(nFilters, 1, 1); dim3 blockSize(FFT_size/4, 1, 1); switch(FFT_size) { case 256: hipLaunchKernelGGL(( k_customFFT_GPU_forward<FFT_256>), dim3(gridSize), dim3(blockSize), FFT_size*8, 0, d_filters, d_filters); break; case 512: hipLaunchKernelGGL(( k_customFFT_GPU_forward<FFT_512>), dim3(gridSize), dim3(blockSize), FFT_size*8, 0, d_filters, d_filters); break; case 1024: hipLaunchKernelGGL(( k_customFFT_GPU_forward<FFT_1024>), dim3(gridSize), dim3(blockSize), FFT_size*8, 0, d_filters, d_filters); break; case 2048: hipLaunchKernelGGL(( k_customFFT_GPU_forward<FFT_2048>), dim3(gridSize), dim3(blockSize), FFT_size*8, 0, d_filters, d_filters); break; case 4096: hipLaunchKernelGGL(( k_customFFT_GPU_forward<FFT_4096>), dim3(gridSize), dim3(blockSize), FFT_size*8, 0, d_filters, d_filters); break; default : break; } } void conv_OLS_customFFT(float2 *d_input_signal, float2 *d_output_plane, float2 *d_filters, int signal_length, int convolution_length, int useful_part_size, int offset, int nConvolutions, int nFilters){ dim3 gridSize(nConvolutions, 1, 1); dim3 blockSize(convolution_length/4, 1, 1); switch(convolution_length) { case 256: hipLaunchKernelGGL(( k_GPU_conv_OLS_via_customFFT<FFT_256>), dim3(gridSize), dim3(blockSize), convolution_length*8, 0, d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 512: hipLaunchKernelGGL(( k_GPU_conv_OLS_via_customFFT<FFT_512>), dim3(gridSize), dim3(blockSize), convolution_length*8, 0, d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 1024: hipLaunchKernelGGL(( k_GPU_conv_OLS_via_customFFT<FFT_1024>), dim3(gridSize), dim3(blockSize), convolution_length*8, 0, d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 2048: hipLaunchKernelGGL(( k_GPU_conv_OLS_via_customFFT<FFT_2048>), dim3(gridSize), dim3(blockSize), convolution_length*8, 0, d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 4096: hipLaunchKernelGGL(( k_GPU_conv_OLS_via_customFFT<FFT_4096>), dim3(gridSize), dim3(blockSize), convolution_length*8, 0, d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; default : break; } } void conv_OLS_customFFT_2filters(float2 *d_input_signal, float2 *d_output_plane, float2 *d_filters, int signal_length, int convolution_length, int useful_part_size, int offset, int nConvolutions, int nFilters){ dim3 gridSize(nConvolutions, 1, 1); dim3 blockSize(convolution_length/4, 1, 1); switch(convolution_length) { case 256: hipLaunchKernelGGL(( k_GPU_conv_OLS_via_customFFT_2filters<FFT_256>), dim3(gridSize), dim3(blockSize), 0, 0, d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 512: hipLaunchKernelGGL(( k_GPU_conv_OLS_via_customFFT_2filters<FFT_512>), dim3(gridSize), dim3(blockSize), 0, 0, d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 1024: hipLaunchKernelGGL(( k_GPU_conv_OLS_via_customFFT_2filters<FFT_1024>), dim3(gridSize), dim3(blockSize), 0, 0, d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 2048: hipLaunchKernelGGL(( k_GPU_conv_OLS_via_customFFT_2filters<FFT_2048>), dim3(gridSize), dim3(blockSize), 0, 0, d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; default : break; } } void convolution_via_customFFT_benchmark(float2 *d_input_signal, float2 *d_output_plane, float2 *d_filters, int signal_length, int convolution_length, int useful_part_size, int offset, int nConvolutions, int nFilters, double *CONV_time, int kernel_type){ GpuTimer timer; // --------> Preparing filters for convolution forwardCustomFFT(d_filters, convolution_length, nFilters); // -----------------------------------------------> // --------> Measured part (Convolution) timer.Start(); CONV_init(); if(kernel_type==1){ conv_OLS_customFFT(d_input_signal, d_output_plane, d_filters, signal_length, convolution_length, useful_part_size, offset, nConvolutions, nFilters); } if(kernel_type==2){ conv_OLS_customFFT_2filters(d_input_signal, d_output_plane, d_filters, signal_length, convolution_length, useful_part_size, offset, nConvolutions, nFilters); } timer.Stop(); *CONV_time += timer.Elapsed(); // --------> Measured part (Convolution) // -----------------------------------------------> } //***************************************************************************** //***************************************************************************** //***************************************************************************** int GPU_convolution_OLS_customFFT(float2 *h_input_signal, float2 *h_output_plane, float2 *h_filters, int signal_length, int convolution_length, int filter_length, int past_filter_samples, int nFilters, int nRuns, int kernel_type, double *execution_time){ //---------> Initial nVidia stuff int devCount; size_t free_mem, total_mem; checkCudaErrors(hipGetDeviceCount(&devCount)); if(device<devCount){ checkCudaErrors(hipSetDevice(device)); } else { printf("ERROR! Selected device is not available\n"); return(1); } hipMemGetInfo(&free_mem,&total_mem); //---------> Time measurements double transfer_in, transfer_out, CONV_kFFT_time, total_CONV_kFFT_time; transfer_in=0.0; transfer_out=0.0; CONV_kFFT_time=0.0; total_CONV_kFFT_time=0; GpuTimer timer; //----> Calculating variables for overlap-and-save int offset = past_filter_samples; int useful_part_size = convolution_length - filter_length + 1; int nConvolutions = (signal_length + useful_part_size - 1)/useful_part_size; if(DEBUG) printf("signal_length: %d; filter_length: %d; segment_size: %d;\n", signal_length, filter_length, convolution_length); if(DEBUG) printf("offset: %d; nConvolutions: %d; useful_part_size: %d;\n", offset, nConvolutions, useful_part_size); //---------> Defining variables and their sizes float2 *d_output_plane; float2 *d_input_signal; float2 *d_filters; size_t input_size = signal_length; size_t output_size = nConvolutions*useful_part_size*nFilters; size_t template_size = convolution_length*nFilters; //---------> Checking memory float free_memory = (float) free_mem/(1024.0*1024.0); float memory_required=(( ((float) input_size) + ((float) output_size) + ((float) template_size))*sizeof(float2))/(1024.0*1024.0); if(DEBUG) printf("\n"); if(DEBUG) printf("DEBUG:\n"); if(DEBUG) printf(" Device has %0.3f MB of total memory, which %0.3f MB is available. Memory required %0.3f MB\n", (float) total_mem/(1024.0*1024.0), free_memory ,memory_required); if(DEBUG) printf(" d_input_signal: %0.3f MB\n", ((float) input_size*sizeof(float2))/(1024.0*1024.0) ); if(DEBUG) printf(" d_filters: %0.3f MB\n", ((float) template_size*sizeof(float2))/(1024.0*1024.0) ); if(DEBUG) printf(" d_output_plane: %0.3f MB\n", ((float) output_size*sizeof(float2))/(1024.0*1024.0) ); if(memory_required>free_memory) {printf("\n \n Array is too big for the device! \n \n"); return(-3);} //---------> Memory allocation if (VERBOSE) printf("Device memory allocation...: \t\t"); timer.Start(); checkCudaErrors(hipMalloc((void **) &d_input_signal, sizeof(float2)*input_size)); checkCudaErrors(hipMalloc((void **) &d_output_plane, sizeof(float2)*output_size)); checkCudaErrors(hipMalloc((void **) &d_filters, sizeof(float2)*template_size)); timer.Stop(); if (VERBOSE) printf("done in %g ms.\n", timer.Elapsed()); //------------------------------------------------------------------------------ //---------> CONV calculation //-----> Copy chunk of input data to a device if (VERBOSE) printf("Transferring data into device memory...: \t\t"); timer.Start(); checkCudaErrors(hipMemcpy(d_input_signal, h_input_signal, input_size*sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_filters, h_filters, template_size*sizeof(float2), hipMemcpyHostToDevice)); timer.Stop(); transfer_in+=timer.Elapsed(); if (VERBOSE) printf("done in %g ms.\n", timer.Elapsed()); if (DEBUG) printf("Calculating convolution via kFFT...: \t\t"); total_CONV_kFFT_time = 0; for(int f=0; f<nRuns; f++){ checkCudaErrors(hipMemcpy(d_input_signal, h_input_signal, input_size*sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_filters, h_filters, template_size*sizeof(float2), hipMemcpyHostToDevice)); convolution_via_customFFT_benchmark(d_input_signal, d_output_plane, d_filters, signal_length, convolution_length, useful_part_size, offset, nConvolutions, nFilters, &total_CONV_kFFT_time, kernel_type); checkCudaErrors(hipGetLastError()); } CONV_kFFT_time=total_CONV_kFFT_time/nRuns; if (DEBUG) printf("done in %g ms.\n", CONV_kFFT_time); *execution_time=CONV_kFFT_time; //-----> Copy chunk of output data to host if (DEBUG) printf("Transferring data to host...: \t\t"); timer.Start(); checkCudaErrors(hipMemcpy( h_output_plane, d_output_plane, output_size*sizeof(float2), hipMemcpyDeviceToHost)); timer.Stop(); transfer_out+=timer.Elapsed(); if (DEBUG) printf("done in %g ms.\n", timer.Elapsed()); //---------> error check ----- checkCudaErrors(hipGetLastError()); //---------> Feeing allocated resources checkCudaErrors(hipFree(d_input_signal)); checkCudaErrors(hipFree(d_output_plane)); checkCudaErrors(hipFree(d_filters)); return(0); }
7c3b448a699470bcf3723a086345f8ee9147c44d.cu
//******************************************************************************************** //* This is GPU implementation of a Overlap-and-save method for calculating convolution. //* Copyright (C) 2019 Adámek Karel //* //* Authors: Karel Adamek ( ORCID:0000-0003-2797-0595; https://github.com/KAdamek ), Wesley Armour ( ORCID:0000-0003-1756-3064 ), Sofia Dimoudi //******************************************************************************************** #include <iostream> #include <fstream> #include <iomanip> #include <cufft.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "debug.h" #include "timer.h" #include "utils_cuda.h" #include "params.h" #define WARP 32 //#define TESTING int device=DEVICEID; class FFT_Params { public: static const int fft_exp = -1; static const int fft_length = -1; static const int warp = 32; }; class FFT_256 : public FFT_Params { public: static const int fft_exp = 8; static const int fft_length = 256; static const int fft_length_quarter = 64; static const int fft_length_half = 128; static const int fft_length_three_quarters = 192; }; class FFT_512 : public FFT_Params { public: static const int fft_exp = 9; static const int fft_length = 512; static const int fft_length_quarter = 128; static const int fft_length_half = 256; static const int fft_length_three_quarters = 384; }; class FFT_1024 : public FFT_Params { public: static const int fft_exp = 10; static const int fft_length = 1024; static const int fft_length_quarter = 256; static const int fft_length_half = 512; static const int fft_length_three_quarters = 768; }; class FFT_2048 : public FFT_Params { public: static const int fft_exp = 11; static const int fft_length = 2048; static const int fft_length_quarter = 512; static const int fft_length_half = 1024; static const int fft_length_three_quarters = 1536; }; class FFT_4096 : public FFT_Params { public: static const int fft_exp = 12; static const int fft_length = 4096; static const int fft_length_quarter = 1024; static const int fft_length_half = 2048; static const int fft_length_three_quarters = 3072; }; class FFT_ConstDirection { public: static const int fft_direction = -1; }; class FFT_forward : public FFT_ConstDirection { public: static const int fft_direction = 0; }; class FFT_inverse : public FFT_ConstDirection { public: static const int fft_direction = 1; }; __device__ __inline__ float2 Get_W_value(int N, int m){ float2 ctemp; sincosf ( -6.283185308f*fdividef( (float) m, (float) N), &ctemp.y, &ctemp.x); return(ctemp); } __device__ __inline__ float2 Get_W_value_inverse(int N, int m){ float2 ctemp; sincosf ( 6.283185308f*fdividef( (float) m, (float) N), &ctemp.y, &ctemp.x); return(ctemp); } __device__ __inline__ float shfl_xor(float *value, int par){ #if (CUDART_VERSION >= 9000) return(__shfl_xor_sync(0xffffffff, (*value), par)); #else return(__shfl_xor((*value), par)); #endif } template<class const_params> __inline__ __device__ void CT_DIT_FFT_4way(float2 *s_input){ float2 A_DFT_value, B_DFT_value, C_DFT_value, D_DFT_value; float2 W; float2 Aftemp, Bftemp, Cftemp, Dftemp; int local_id, warp_id; int j, m_param; int parity, itemp; int A_read_index, B_read_index, C_read_index, D_read_index; int PoT, PoTp1, q; local_id = threadIdx.x & (const_params::warp - 1); warp_id = threadIdx.x/const_params::warp; #ifdef TESTING int A_load_id, B_load_id, i, A_n, B_n; A_load_id = threadIdx.x; B_load_id = threadIdx.x + const_params::fft_length_quarter; A_n=threadIdx.x; B_n=threadIdx.x + const_params::fft_length_quarter; for(i=1; i<const_params::fft_exp; i++) { A_n >>= 1; B_n >>= 1; A_load_id <<= 1; A_load_id |= A_n & 1; B_load_id <<= 1; B_load_id |= B_n & 1; } A_load_id &= const_params::fft_length-1; B_load_id &= const_params::fft_length-1; //-----> Scrambling input A_DFT_value=s_input[A_load_id]; B_DFT_value=s_input[A_load_id + 1]; C_DFT_value=s_input[B_load_id]; D_DFT_value=s_input[B_load_id + 1]; __syncthreads(); s_input[threadIdx.x] = A_DFT_value; s_input[threadIdx.x + const_params::fft_length_half] = B_DFT_value; s_input[threadIdx.x + const_params::fft_length_quarter] = C_DFT_value; s_input[threadIdx.x + const_params::fft_length_three_quarters] = D_DFT_value; __syncthreads(); #endif //-----> FFT //--> PoT=1; PoTp1=2; //--> First iteration itemp=local_id&1; parity=(1-itemp*2); A_DFT_value=s_input[local_id + (warp_id<<2)*const_params::warp]; B_DFT_value=s_input[local_id + (warp_id<<2)*const_params::warp + const_params::warp]; C_DFT_value=s_input[local_id + (warp_id<<2)*const_params::warp + 2*const_params::warp]; D_DFT_value=s_input[local_id + (warp_id<<2)*const_params::warp + 3*const_params::warp]; __syncthreads(); A_DFT_value.x=parity*A_DFT_value.x + shfl_xor(&A_DFT_value.x, 1); A_DFT_value.y=parity*A_DFT_value.y + shfl_xor(&A_DFT_value.y, 1); B_DFT_value.x=parity*B_DFT_value.x + shfl_xor(&B_DFT_value.x, 1); B_DFT_value.y=parity*B_DFT_value.y + shfl_xor(&B_DFT_value.y, 1); C_DFT_value.x=parity*C_DFT_value.x + shfl_xor(&C_DFT_value.x, 1); C_DFT_value.y=parity*C_DFT_value.y + shfl_xor(&C_DFT_value.y, 1); D_DFT_value.x=parity*D_DFT_value.x + shfl_xor(&D_DFT_value.x, 1); D_DFT_value.y=parity*D_DFT_value.y + shfl_xor(&D_DFT_value.y, 1); //--> Second through Fifth iteration (no synchronization) PoT=2; PoTp1=4; for(q=1;q<5;q++){ m_param = (local_id & (PoTp1 - 1)); itemp = m_param>>q; parity=((itemp<<1)-1); W = Get_W_value_inverse(PoTp1, itemp*m_param); Aftemp.x = W.x*A_DFT_value.x - W.y*A_DFT_value.y; Aftemp.y = W.x*A_DFT_value.y + W.y*A_DFT_value.x; Bftemp.x = W.x*B_DFT_value.x - W.y*B_DFT_value.y; Bftemp.y = W.x*B_DFT_value.y + W.y*B_DFT_value.x; Cftemp.x = W.x*C_DFT_value.x - W.y*C_DFT_value.y; Cftemp.y = W.x*C_DFT_value.y + W.y*C_DFT_value.x; Dftemp.x = W.x*D_DFT_value.x - W.y*D_DFT_value.y; Dftemp.y = W.x*D_DFT_value.y + W.y*D_DFT_value.x; A_DFT_value.x = Aftemp.x + parity*shfl_xor(&Aftemp.x,PoT); A_DFT_value.y = Aftemp.y + parity*shfl_xor(&Aftemp.y,PoT); B_DFT_value.x = Bftemp.x + parity*shfl_xor(&Bftemp.x,PoT); B_DFT_value.y = Bftemp.y + parity*shfl_xor(&Bftemp.y,PoT); C_DFT_value.x = Cftemp.x + parity*shfl_xor(&Cftemp.x,PoT); C_DFT_value.y = Cftemp.y + parity*shfl_xor(&Cftemp.y,PoT); D_DFT_value.x = Dftemp.x + parity*shfl_xor(&Dftemp.x,PoT); D_DFT_value.y = Dftemp.y + parity*shfl_xor(&Dftemp.y,PoT); PoT=PoT<<1; PoTp1=PoTp1<<1; } itemp = local_id + (warp_id<<2)*const_params::warp; s_input[itemp] = A_DFT_value; s_input[itemp + const_params::warp] = B_DFT_value; s_input[itemp + 2*const_params::warp] = C_DFT_value; s_input[itemp + 3*const_params::warp] = D_DFT_value; for(q=5;q<(const_params::fft_exp-1);q++){ __syncthreads(); m_param = threadIdx.x & (PoT - 1); j=threadIdx.x>>q; W=Get_W_value_inverse(PoTp1,m_param); A_read_index=j*(PoTp1<<1) + m_param; B_read_index=j*(PoTp1<<1) + m_param + PoT; C_read_index=j*(PoTp1<<1) + m_param + PoTp1; D_read_index=j*(PoTp1<<1) + m_param + 3*PoT; Aftemp = s_input[A_read_index]; Bftemp = s_input[B_read_index]; A_DFT_value.x=Aftemp.x + W.x*Bftemp.x - W.y*Bftemp.y; A_DFT_value.y=Aftemp.y + W.x*Bftemp.y + W.y*Bftemp.x; B_DFT_value.x=Aftemp.x - W.x*Bftemp.x + W.y*Bftemp.y; B_DFT_value.y=Aftemp.y - W.x*Bftemp.y - W.y*Bftemp.x; Cftemp = s_input[C_read_index]; Dftemp = s_input[D_read_index]; C_DFT_value.x=Cftemp.x + W.x*Dftemp.x - W.y*Dftemp.y; C_DFT_value.y=Cftemp.y + W.x*Dftemp.y + W.y*Dftemp.x; D_DFT_value.x=Cftemp.x - W.x*Dftemp.x + W.y*Dftemp.y; D_DFT_value.y=Cftemp.y - W.x*Dftemp.y - W.y*Dftemp.x; s_input[A_read_index]=A_DFT_value; s_input[B_read_index]=B_DFT_value; s_input[C_read_index]=C_DFT_value; s_input[D_read_index]=D_DFT_value; PoT=PoT<<1; PoTp1=PoTp1<<1; } //last iteration __syncthreads(); m_param = threadIdx.x; W=Get_W_value_inverse(PoTp1,m_param); A_read_index = m_param; B_read_index = m_param + PoT; C_read_index = m_param + (PoT>>1); D_read_index = m_param + 3*(PoT>>1); Aftemp = s_input[A_read_index]; Bftemp = s_input[B_read_index]; A_DFT_value.x=Aftemp.x + W.x*Bftemp.x - W.y*Bftemp.y; A_DFT_value.y=Aftemp.y + W.x*Bftemp.y + W.y*Bftemp.x; B_DFT_value.x=Aftemp.x - W.x*Bftemp.x + W.y*Bftemp.y; B_DFT_value.y=Aftemp.y - W.x*Bftemp.y - W.y*Bftemp.x; Cftemp = s_input[C_read_index]; Dftemp = s_input[D_read_index]; C_DFT_value.x=Cftemp.x - W.y*Dftemp.x - W.x*Dftemp.y; C_DFT_value.y=Cftemp.y - W.y*Dftemp.y + W.x*Dftemp.x; D_DFT_value.x=Cftemp.x + W.y*Dftemp.x + W.x*Dftemp.y; D_DFT_value.y=Cftemp.y + W.y*Dftemp.y - W.x*Dftemp.x; s_input[A_read_index]=A_DFT_value; s_input[B_read_index]=B_DFT_value; s_input[C_read_index]=C_DFT_value; s_input[D_read_index]=D_DFT_value; __syncthreads(); } template<class const_params> __inline__ __device__ void CT_DIF_FFT_4way(float2 *s_input){ float2 A_DFT_value, B_DFT_value, C_DFT_value, D_DFT_value; float2 W; float2 Aftemp, Bftemp, Cftemp, Dftemp; int local_id, warp_id; int j, m_param, parity; int A_read_index, B_read_index, C_read_index, D_read_index; int PoT, PoTm1, q; local_id = threadIdx.x & (WARP - 1); warp_id = threadIdx.x/WARP; //-----> FFT //--> PoTm1 = const_params::fft_length_half; PoT = const_params::fft_length; //Highest iteration m_param = threadIdx.x; j=0; A_read_index = m_param; B_read_index = m_param + PoTm1; C_read_index = m_param + (PoTm1>>1); D_read_index = m_param + 3*(PoTm1>>1); W=Get_W_value(PoT, m_param); Aftemp = s_input[A_read_index]; Bftemp = s_input[B_read_index]; Cftemp = s_input[C_read_index]; Dftemp = s_input[D_read_index]; A_DFT_value.x = Aftemp.x + Bftemp.x; A_DFT_value.y = Aftemp.y + Bftemp.y; B_DFT_value.x = W.x*(Aftemp.x - Bftemp.x) - W.y*(Aftemp.y - Bftemp.y); B_DFT_value.y = W.x*(Aftemp.y - Bftemp.y) + W.y*(Aftemp.x - Bftemp.x); C_DFT_value.x = Cftemp.x + Dftemp.x; C_DFT_value.y = Cftemp.y + Dftemp.y; D_DFT_value.x = W.y*(Cftemp.x - Dftemp.x) + W.x*(Cftemp.y - Dftemp.y); D_DFT_value.y = W.y*(Cftemp.y - Dftemp.y) - W.x*(Cftemp.x - Dftemp.x); s_input[A_read_index]=A_DFT_value; s_input[B_read_index]=B_DFT_value; s_input[C_read_index]=C_DFT_value; s_input[D_read_index]=D_DFT_value; PoT=PoT>>1; PoTm1=PoTm1>>1; for(q=(const_params::fft_exp-2);q>4;q--){ __syncthreads(); m_param = threadIdx.x & (PoTm1 - 1); j=threadIdx.x>>q; W=Get_W_value(PoT, m_param); A_read_index=j*(PoT<<1) + m_param; B_read_index=j*(PoT<<1) + m_param + PoTm1; C_read_index=j*(PoT<<1) + m_param + PoT; D_read_index=j*(PoT<<1) + m_param + 3*PoTm1; Aftemp = s_input[A_read_index]; Bftemp = s_input[B_read_index]; Cftemp = s_input[C_read_index]; Dftemp = s_input[D_read_index]; A_DFT_value.x = Aftemp.x + Bftemp.x; A_DFT_value.y = Aftemp.y + Bftemp.y; C_DFT_value.x = Cftemp.x + Dftemp.x; C_DFT_value.y = Cftemp.y + Dftemp.y; B_DFT_value.x = W.x*(Aftemp.x - Bftemp.x) - W.y*(Aftemp.y - Bftemp.y); B_DFT_value.y = W.x*(Aftemp.y - Bftemp.y) + W.y*(Aftemp.x - Bftemp.x); D_DFT_value.x = W.x*(Cftemp.x - Dftemp.x) - W.y*(Cftemp.y - Dftemp.y); D_DFT_value.y = W.x*(Cftemp.y - Dftemp.y) + W.y*(Cftemp.x - Dftemp.x); s_input[A_read_index]=A_DFT_value; s_input[B_read_index]=B_DFT_value; s_input[C_read_index]=C_DFT_value; s_input[D_read_index]=D_DFT_value; PoT=PoT>>1; PoTm1=PoTm1>>1; } __syncthreads(); j = local_id + (warp_id<<2)*WARP; A_DFT_value = s_input[j]; B_DFT_value = s_input[j + WARP]; C_DFT_value = s_input[j + 2*WARP]; D_DFT_value = s_input[j + 3*WARP]; for(q=4;q>=0;q--){ m_param = (local_id & (PoT - 1)); j = m_param>>q; parity=(1-j*2); W = Get_W_value(PoT, j*(m_param-PoTm1)); Aftemp.x = parity*A_DFT_value.x + shfl_xor(&A_DFT_value.x, PoTm1); Aftemp.y = parity*A_DFT_value.y + shfl_xor(&A_DFT_value.y, PoTm1); Bftemp.x = parity*B_DFT_value.x + shfl_xor(&B_DFT_value.x, PoTm1); Bftemp.y = parity*B_DFT_value.y + shfl_xor(&B_DFT_value.y, PoTm1); Cftemp.x = parity*C_DFT_value.x + shfl_xor(&C_DFT_value.x, PoTm1); Cftemp.y = parity*C_DFT_value.y + shfl_xor(&C_DFT_value.y, PoTm1); Dftemp.x = parity*D_DFT_value.x + shfl_xor(&D_DFT_value.x, PoTm1); Dftemp.y = parity*D_DFT_value.y + shfl_xor(&D_DFT_value.y, PoTm1); A_DFT_value.x = W.x*Aftemp.x - W.y*Aftemp.y; A_DFT_value.y = W.x*Aftemp.y + W.y*Aftemp.x; B_DFT_value.x = W.x*Bftemp.x - W.y*Bftemp.y; B_DFT_value.y = W.x*Bftemp.y + W.y*Bftemp.x; C_DFT_value.x = W.x*Cftemp.x - W.y*Cftemp.y; C_DFT_value.y = W.x*Cftemp.y + W.y*Cftemp.x; D_DFT_value.x = W.x*Dftemp.x - W.y*Dftemp.y; D_DFT_value.y = W.x*Dftemp.y + W.y*Dftemp.x; PoT=PoT>>1; PoTm1=PoTm1>>1; } j = local_id + (warp_id<<2)*WARP; s_input[j] = A_DFT_value; s_input[j + WARP] = B_DFT_value; s_input[j + 2*WARP] = C_DFT_value; s_input[j + 3*WARP] = D_DFT_value; __syncthreads(); #ifdef TESTING __syncthreads(); int A_load_id, B_load_id, i, A_n, B_n; A_load_id = threadIdx.x; B_load_id = threadIdx.x + const_params::fft_length_quarter; A_n=threadIdx.x; B_n=threadIdx.x + const_params::fft_length_quarter; for(i=1; i<const_params::fft_exp; i++) { A_n >>= 1; B_n >>= 1; A_load_id <<= 1; A_load_id |= A_n & 1; B_load_id <<= 1; B_load_id |= B_n & 1; } A_load_id &= const_params::fft_length-1; B_load_id &= const_params::fft_length-1; //-----> Scrambling input A_DFT_value=s_input[A_load_id]; B_DFT_value=s_input[A_load_id + 1]; C_DFT_value=s_input[B_load_id]; D_DFT_value=s_input[B_load_id + 1]; __syncthreads(); s_input[threadIdx.x] = A_DFT_value; s_input[threadIdx.x + const_params::fft_length_half] = B_DFT_value; s_input[threadIdx.x + const_params::fft_length_quarter] = C_DFT_value; s_input[threadIdx.x + const_params::fft_length_three_quarters] = D_DFT_value; __syncthreads(); #endif } template<class const_params> __inline__ __device__ void FFT_CT_DIT_4elem_2vertical_no_reorder(float2 *s_input1, float2 *s_input2){ float2 A_DFT_value1, B_DFT_value1, C_DFT_value1, D_DFT_value1; float2 A_DFT_value2, B_DFT_value2, C_DFT_value2, D_DFT_value2; float2 W; float2 Aftemp1, Bftemp1, Cftemp1, Dftemp1; float2 Aftemp2, Bftemp2, Cftemp2, Dftemp2; int local_id, warp_id; int j, m_param; int parity, itemp; int A_read_index, B_read_index, C_read_index, D_read_index; int PoT, PoTp1, q; local_id = threadIdx.x & (WARP - 1); warp_id = threadIdx.x/WARP; //-----> FFT //--> PoT=1; PoTp1=2; //--> First iteration itemp=local_id&1; parity=(1-itemp*2); A_DFT_value1=s_input1[local_id + (warp_id<<2)*WARP]; B_DFT_value1=s_input1[local_id + (warp_id<<2)*WARP + WARP]; C_DFT_value1=s_input1[local_id + (warp_id<<2)*WARP + 2*WARP]; D_DFT_value1=s_input1[local_id + (warp_id<<2)*WARP + 3*WARP]; A_DFT_value2=s_input2[local_id + (warp_id<<2)*WARP]; B_DFT_value2=s_input2[local_id + (warp_id<<2)*WARP + WARP]; C_DFT_value2=s_input2[local_id + (warp_id<<2)*WARP + 2*WARP]; D_DFT_value2=s_input2[local_id + (warp_id<<2)*WARP + 3*WARP]; __syncthreads(); A_DFT_value1.x=parity*A_DFT_value1.x + shfl_xor(&A_DFT_value1.x,1); A_DFT_value1.y=parity*A_DFT_value1.y + shfl_xor(&A_DFT_value1.y,1); B_DFT_value1.x=parity*B_DFT_value1.x + shfl_xor(&B_DFT_value1.x,1); B_DFT_value1.y=parity*B_DFT_value1.y + shfl_xor(&B_DFT_value1.y,1); C_DFT_value1.x=parity*C_DFT_value1.x + shfl_xor(&C_DFT_value1.x,1); C_DFT_value1.y=parity*C_DFT_value1.y + shfl_xor(&C_DFT_value1.y,1); D_DFT_value1.x=parity*D_DFT_value1.x + shfl_xor(&D_DFT_value1.x,1); D_DFT_value1.y=parity*D_DFT_value1.y + shfl_xor(&D_DFT_value1.y,1); A_DFT_value2.x=parity*A_DFT_value2.x + shfl_xor(&A_DFT_value2.x,1); A_DFT_value2.y=parity*A_DFT_value2.y + shfl_xor(&A_DFT_value2.y,1); B_DFT_value2.x=parity*B_DFT_value2.x + shfl_xor(&B_DFT_value2.x,1); B_DFT_value2.y=parity*B_DFT_value2.y + shfl_xor(&B_DFT_value2.y,1); C_DFT_value2.x=parity*C_DFT_value2.x + shfl_xor(&C_DFT_value2.x,1); C_DFT_value2.y=parity*C_DFT_value2.y + shfl_xor(&C_DFT_value2.y,1); D_DFT_value2.x=parity*D_DFT_value2.x + shfl_xor(&D_DFT_value2.x,1); D_DFT_value2.y=parity*D_DFT_value2.y + shfl_xor(&D_DFT_value2.y,1); //--> Second through Fifth iteration (no synchronization) PoT=2; PoTp1=4; for(q=1;q<5;q++){ m_param = (local_id & (PoTp1 - 1)); itemp = m_param>>q; parity=((itemp<<1)-1); W = Get_W_value_inverse(PoTp1, itemp*m_param); Aftemp1.x = W.x*A_DFT_value1.x - W.y*A_DFT_value1.y; Aftemp1.y = W.x*A_DFT_value1.y + W.y*A_DFT_value1.x; Bftemp1.x = W.x*B_DFT_value1.x - W.y*B_DFT_value1.y; Bftemp1.y = W.x*B_DFT_value1.y + W.y*B_DFT_value1.x; Cftemp1.x = W.x*C_DFT_value1.x - W.y*C_DFT_value1.y; Cftemp1.y = W.x*C_DFT_value1.y + W.y*C_DFT_value1.x; Dftemp1.x = W.x*D_DFT_value1.x - W.y*D_DFT_value1.y; Dftemp1.y = W.x*D_DFT_value1.y + W.y*D_DFT_value1.x; Aftemp2.x = W.x*A_DFT_value2.x - W.y*A_DFT_value2.y; Aftemp2.y = W.x*A_DFT_value2.y + W.y*A_DFT_value2.x; Bftemp2.x = W.x*B_DFT_value2.x - W.y*B_DFT_value2.y; Bftemp2.y = W.x*B_DFT_value2.y + W.y*B_DFT_value2.x; Cftemp2.x = W.x*C_DFT_value2.x - W.y*C_DFT_value2.y; Cftemp2.y = W.x*C_DFT_value2.y + W.y*C_DFT_value2.x; Dftemp2.x = W.x*D_DFT_value2.x - W.y*D_DFT_value2.y; Dftemp2.y = W.x*D_DFT_value2.y + W.y*D_DFT_value2.x; A_DFT_value1.x = Aftemp1.x + parity*shfl_xor(&Aftemp1.x,PoT); A_DFT_value1.y = Aftemp1.y + parity*shfl_xor(&Aftemp1.y,PoT); B_DFT_value1.x = Bftemp1.x + parity*shfl_xor(&Bftemp1.x,PoT); B_DFT_value1.y = Bftemp1.y + parity*shfl_xor(&Bftemp1.y,PoT); C_DFT_value1.x = Cftemp1.x + parity*shfl_xor(&Cftemp1.x,PoT); C_DFT_value1.y = Cftemp1.y + parity*shfl_xor(&Cftemp1.y,PoT); D_DFT_value1.x = Dftemp1.x + parity*shfl_xor(&Dftemp1.x,PoT); D_DFT_value1.y = Dftemp1.y + parity*shfl_xor(&Dftemp1.y,PoT); A_DFT_value2.x = Aftemp2.x + parity*shfl_xor(&Aftemp2.x,PoT); A_DFT_value2.y = Aftemp2.y + parity*shfl_xor(&Aftemp2.y,PoT); B_DFT_value2.x = Bftemp2.x + parity*shfl_xor(&Bftemp2.x,PoT); B_DFT_value2.y = Bftemp2.y + parity*shfl_xor(&Bftemp2.y,PoT); C_DFT_value2.x = Cftemp2.x + parity*shfl_xor(&Cftemp2.x,PoT); C_DFT_value2.y = Cftemp2.y + parity*shfl_xor(&Cftemp2.y,PoT); D_DFT_value2.x = Dftemp2.x + parity*shfl_xor(&Dftemp2.x,PoT); D_DFT_value2.y = Dftemp2.y + parity*shfl_xor(&Dftemp2.y,PoT); PoT=PoT<<1; PoTp1=PoTp1<<1; } itemp = local_id + (warp_id<<2)*WARP; s_input1[itemp] = A_DFT_value1; s_input1[itemp + WARP] = B_DFT_value1; s_input1[itemp + 2*WARP] = C_DFT_value1; s_input1[itemp + 3*WARP] = D_DFT_value1; s_input2[itemp] = A_DFT_value2; s_input2[itemp + WARP] = B_DFT_value2; s_input2[itemp + 2*WARP] = C_DFT_value2; s_input2[itemp + 3*WARP] = D_DFT_value2; for(q=5;q<(const_params::fft_exp-1);q++){ __syncthreads(); m_param = threadIdx.x & (PoT - 1); j=threadIdx.x>>q; W=Get_W_value_inverse(PoTp1,m_param); A_read_index=j*(PoTp1<<1) + m_param; B_read_index=j*(PoTp1<<1) + m_param + PoT; C_read_index=j*(PoTp1<<1) + m_param + PoTp1; D_read_index=j*(PoTp1<<1) + m_param + 3*PoT; Aftemp1 = s_input1[A_read_index]; Bftemp1 = s_input1[B_read_index]; A_DFT_value1.x=Aftemp1.x + W.x*Bftemp1.x - W.y*Bftemp1.y; A_DFT_value1.y=Aftemp1.y + W.x*Bftemp1.y + W.y*Bftemp1.x; B_DFT_value1.x=Aftemp1.x - W.x*Bftemp1.x + W.y*Bftemp1.y; B_DFT_value1.y=Aftemp1.y - W.x*Bftemp1.y - W.y*Bftemp1.x; Aftemp2 = s_input2[A_read_index]; Bftemp2 = s_input2[B_read_index]; A_DFT_value2.x=Aftemp2.x + W.x*Bftemp2.x - W.y*Bftemp2.y; A_DFT_value2.y=Aftemp2.y + W.x*Bftemp2.y + W.y*Bftemp2.x; B_DFT_value2.x=Aftemp2.x - W.x*Bftemp2.x + W.y*Bftemp2.y; B_DFT_value2.y=Aftemp2.y - W.x*Bftemp2.y - W.y*Bftemp2.x; Cftemp1 = s_input1[C_read_index]; Dftemp1 = s_input1[D_read_index]; C_DFT_value1.x=Cftemp1.x + W.x*Dftemp1.x - W.y*Dftemp1.y; C_DFT_value1.y=Cftemp1.y + W.x*Dftemp1.y + W.y*Dftemp1.x; D_DFT_value1.x=Cftemp1.x - W.x*Dftemp1.x + W.y*Dftemp1.y; D_DFT_value1.y=Cftemp1.y - W.x*Dftemp1.y - W.y*Dftemp1.x; Cftemp2 = s_input2[C_read_index]; Dftemp2 = s_input2[D_read_index]; C_DFT_value2.x=Cftemp2.x + W.x*Dftemp2.x - W.y*Dftemp2.y; C_DFT_value2.y=Cftemp2.y + W.x*Dftemp2.y + W.y*Dftemp2.x; D_DFT_value2.x=Cftemp2.x - W.x*Dftemp2.x + W.y*Dftemp2.y; D_DFT_value2.y=Cftemp2.y - W.x*Dftemp2.y - W.y*Dftemp2.x; s_input1[A_read_index]=A_DFT_value1; s_input1[B_read_index]=B_DFT_value1; s_input1[C_read_index]=C_DFT_value1; s_input1[D_read_index]=D_DFT_value1; s_input2[A_read_index]=A_DFT_value2; s_input2[B_read_index]=B_DFT_value2; s_input2[C_read_index]=C_DFT_value2; s_input2[D_read_index]=D_DFT_value2; PoT=PoT<<1; PoTp1=PoTp1<<1; } //last iteration __syncthreads(); m_param = threadIdx.x; W=Get_W_value_inverse(PoTp1,m_param); A_read_index = m_param; B_read_index = m_param + PoT; C_read_index = m_param + (PoT>>1); D_read_index = m_param + 3*(PoT>>1); Aftemp1 = s_input1[A_read_index]; Bftemp1 = s_input1[B_read_index]; A_DFT_value1.x=Aftemp1.x + W.x*Bftemp1.x - W.y*Bftemp1.y; A_DFT_value1.y=Aftemp1.y + W.x*Bftemp1.y + W.y*Bftemp1.x; B_DFT_value1.x=Aftemp1.x - W.x*Bftemp1.x + W.y*Bftemp1.y; B_DFT_value1.y=Aftemp1.y - W.x*Bftemp1.y - W.y*Bftemp1.x; Aftemp2 = s_input2[A_read_index]; Bftemp2 = s_input2[B_read_index]; A_DFT_value2.x=Aftemp2.x + W.x*Bftemp2.x - W.y*Bftemp2.y; A_DFT_value2.y=Aftemp2.y + W.x*Bftemp2.y + W.y*Bftemp2.x; B_DFT_value2.x=Aftemp2.x - W.x*Bftemp2.x + W.y*Bftemp2.y; B_DFT_value2.y=Aftemp2.y - W.x*Bftemp2.y - W.y*Bftemp2.x; Cftemp1 = s_input1[C_read_index]; Dftemp1 = s_input1[D_read_index]; C_DFT_value1.x=Cftemp1.x - W.y*Dftemp1.x - W.x*Dftemp1.y; C_DFT_value1.y=Cftemp1.y - W.y*Dftemp1.y + W.x*Dftemp1.x; D_DFT_value1.x=Cftemp1.x + W.y*Dftemp1.x + W.x*Dftemp1.y; D_DFT_value1.y=Cftemp1.y + W.y*Dftemp1.y - W.x*Dftemp1.x; Cftemp2 = s_input2[C_read_index]; Dftemp2 = s_input2[D_read_index]; C_DFT_value2.x=Cftemp2.x - W.y*Dftemp2.x - W.x*Dftemp2.y; C_DFT_value2.y=Cftemp2.y - W.y*Dftemp2.y + W.x*Dftemp2.x; D_DFT_value2.x=Cftemp2.x + W.y*Dftemp2.x + W.x*Dftemp2.y; D_DFT_value2.y=Cftemp2.y + W.y*Dftemp2.y - W.x*Dftemp2.x; s_input1[A_read_index]=A_DFT_value1; s_input1[B_read_index]=B_DFT_value1; s_input1[C_read_index]=C_DFT_value1; s_input1[D_read_index]=D_DFT_value1; s_input2[A_read_index]=A_DFT_value2; s_input2[B_read_index]=B_DFT_value2; s_input2[C_read_index]=C_DFT_value2; s_input2[D_read_index]=D_DFT_value2; __syncthreads(); } template<class const_params> __global__ void k_customFFT_GPU_forward(float2 *d_input, float2* d_output) { extern __shared__ float2 s_input[]; s_input[threadIdx.x] = d_input[threadIdx.x + blockIdx.x*const_params::fft_length]; s_input[threadIdx.x + const_params::fft_length_quarter] = d_input[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_quarter]; s_input[threadIdx.x + const_params::fft_length_half] = d_input[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_half]; s_input[threadIdx.x + const_params::fft_length_three_quarters] = d_input[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_three_quarters]; __syncthreads(); CT_DIF_FFT_4way<const_params>(s_input); __syncthreads(); d_output[threadIdx.x + blockIdx.x*const_params::fft_length] = s_input[threadIdx.x]; d_output[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_quarter] = s_input[threadIdx.x + const_params::fft_length_quarter]; d_output[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_half] = s_input[threadIdx.x + const_params::fft_length_half]; d_output[threadIdx.x + blockIdx.x*const_params::fft_length + const_params::fft_length_three_quarters] = s_input[threadIdx.x + const_params::fft_length_three_quarters]; } template<class const_params> __device__ __inline__ void prepare_signal_4elem(float2* s_signal, float2 const* __restrict__ d_input_signal, int signal_length, int useful_part_size, int offset) { int pos = blockIdx.x*useful_part_size; pos = blockIdx.x*useful_part_size + threadIdx.x - offset; s_signal[threadIdx.x].x = 0; s_signal[threadIdx.x].y = 0; s_signal[threadIdx.x + const_params::fft_length_quarter].x = 0; s_signal[threadIdx.x + const_params::fft_length_quarter].y = 0; s_signal[threadIdx.x + const_params::fft_length_half].x = 0; s_signal[threadIdx.x + const_params::fft_length_half].y = 0; s_signal[threadIdx.x + const_params::fft_length_three_quarters].x = 0; s_signal[threadIdx.x + const_params::fft_length_three_quarters].y = 0; if( pos>=0 && pos<signal_length ) s_signal[threadIdx.x] = d_input_signal[pos]; if( (pos + const_params::fft_length_quarter)>=0 && (pos + const_params::fft_length_quarter)<signal_length ) s_signal[threadIdx.x + const_params::fft_length_quarter] = d_input_signal[pos + const_params::fft_length_quarter]; if( (pos + const_params::fft_length_half)>=0 && (pos + const_params::fft_length_half)<signal_length ) s_signal[threadIdx.x + const_params::fft_length_half] = d_input_signal[pos + const_params::fft_length_half]; if( (pos + const_params::fft_length_three_quarters)>=0 && (pos + const_params::fft_length_three_quarters)<signal_length ) s_signal[threadIdx.x + const_params::fft_length_three_quarters] = d_input_signal[pos + const_params::fft_length_three_quarters]; } template<class const_params> __global__ void k_GPU_conv_OLS_via_customFFT( float2 const* __restrict__ d_input_signal, float2 *d_output_plane, float2 const* __restrict__ d_filters, int signal_length, int useful_part_size, int offset, int nConvolutions, int nFilters) { extern __shared__ float2 s_input_1[]; float2 r_filter_1[4]; float2 signal[4]; int pos, t; // Loading signal segment prepare_signal_4elem<const_params>(s_input_1, d_input_signal, signal_length, useful_part_size, offset); offset = ((const_params::fft_length - useful_part_size + 1)>>1); // Forward FFT on input signal CT_DIF_FFT_4way<const_params>(s_input_1); // Storing FFTed signal for reuse signal[0]=s_input_1[threadIdx.x]; signal[1]=s_input_1[threadIdx.x + const_params::fft_length_quarter]; signal[2]=s_input_1[threadIdx.x + const_params::fft_length_half]; signal[3]=s_input_1[threadIdx.x + const_params::fft_length_three_quarters]; for(t=0; t<nFilters; t++){ // Loading filters pos = t*const_params::fft_length + threadIdx.x; r_filter_1[0]=__ldg(&d_filters[pos]); r_filter_1[1]=__ldg(&d_filters[pos + const_params::fft_length_quarter]); r_filter_1[2]=__ldg(&d_filters[pos + const_params::fft_length_half]); r_filter_1[3]=__ldg(&d_filters[pos + const_params::fft_length_three_quarters]); // Convolution (complex multiplication) s_input_1[threadIdx.x].x = (r_filter_1[0].x*signal[0].x - r_filter_1[0].y*signal[0].y)/((float) const_params::fft_length); s_input_1[threadIdx.x].y = (r_filter_1[0].x*signal[0].y + r_filter_1[0].y*signal[0].x)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_quarter].x = (r_filter_1[1].x*signal[1].x - r_filter_1[1].y*signal[1].y)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_quarter].y = (r_filter_1[1].x*signal[1].y + r_filter_1[1].y*signal[1].x)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_half].x = (r_filter_1[2].x*signal[2].x - r_filter_1[2].y*signal[2].y)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_half].y = (r_filter_1[2].x*signal[2].y + r_filter_1[2].y*signal[2].x)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_three_quarters].x = (r_filter_1[3].x*signal[3].x - r_filter_1[3].y*signal[3].y)/((float) const_params::fft_length); s_input_1[threadIdx.x + const_params::fft_length_three_quarters].y = (r_filter_1[3].x*signal[3].y + r_filter_1[3].y*signal[3].x)/((float) const_params::fft_length); __syncthreads(); //----------> Inverse FFT CT_DIT_FFT_4way<const_params>(s_input_1); //----------< // Writing out the clean part of the segment pos = t*useful_part_size*nConvolutions + blockIdx.x*useful_part_size + threadIdx.x; if( threadIdx.x>=offset && threadIdx.x<(useful_part_size+offset) ) { d_output_plane[pos - offset] = s_input_1[threadIdx.x]; } if( (threadIdx.x + const_params::fft_length_quarter)>=offset && (threadIdx.x + const_params::fft_length_quarter)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_quarter - offset] = s_input_1[threadIdx.x + const_params::fft_length_quarter]; } if( (threadIdx.x + const_params::fft_length_half)>=offset && (threadIdx.x + const_params::fft_length_half)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_half - offset] = s_input_1[threadIdx.x + const_params::fft_length_half]; } if( (threadIdx.x + const_params::fft_length_three_quarters)>=offset && (threadIdx.x + const_params::fft_length_three_quarters)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_three_quarters - offset] = s_input_1[threadIdx.x + const_params::fft_length_three_quarters]; } __syncthreads(); } } template<class const_params> __global__ void k_GPU_conv_OLS_via_customFFT_2filters( float2 const* __restrict__ d_input_signal, float2 *d_output_plane, float2 const* __restrict__ d_filters, int signal_length, int useful_part_size, int offset, int nConvolutions, int nFilters) { __shared__ float2 s_input_1[const_params::fft_length]; __shared__ float2 s_input_2[const_params::fft_length]; float2 r_filter_1[4]; float2 r_filter_2[4]; float2 signal[4]; int pos, t; // Loading data prepare_signal_4elem<const_params>(s_input_1, d_input_signal, signal_length, useful_part_size, offset); // Forward FFT on input signal CT_DIF_FFT_4way<const_params>(s_input_1); // Storing FFTed signal for reuse signal[0]=s_input_1[threadIdx.x]; signal[1]=s_input_1[threadIdx.x + const_params::fft_length_quarter]; signal[2]=s_input_1[threadIdx.x + const_params::fft_length_half]; signal[3]=s_input_1[threadIdx.x + const_params::fft_length_three_quarters]; for(t=0; t<(nFilters>>1); t++){ // Loading filters pos = 2*t*const_params::fft_length + threadIdx.x; r_filter_1[0]=__ldg(&d_filters[pos]); r_filter_1[1]=__ldg(&d_filters[pos + const_params::fft_length_quarter]); r_filter_1[2]=__ldg(&d_filters[pos + const_params::fft_length_half]); r_filter_1[3]=__ldg(&d_filters[pos + const_params::fft_length_three_quarters]); r_filter_2[0]=__ldg(&d_filters[pos + const_params::fft_length]); r_filter_2[1]=__ldg(&d_filters[pos + const_params::fft_length + const_params::fft_length_quarter]); r_filter_2[2]=__ldg(&d_filters[pos + const_params::fft_length + const_params::fft_length_half]); r_filter_2[3]=__ldg(&d_filters[pos + const_params::fft_length + const_params::fft_length_three_quarters]); // Convolution (complex multiplication) s_input_1[threadIdx.x].x = r_filter_1[0].x*signal[0].x - r_filter_1[0].y*signal[0].y; s_input_1[threadIdx.x].y = r_filter_1[0].x*signal[0].y + r_filter_1[0].y*signal[0].x; s_input_1[threadIdx.x + const_params::fft_length_quarter].x = r_filter_1[1].x*signal[1].x - r_filter_1[1].y*signal[1].y; s_input_1[threadIdx.x + const_params::fft_length_quarter].y = r_filter_1[1].x*signal[1].y + r_filter_1[1].y*signal[1].x; s_input_1[threadIdx.x + const_params::fft_length_half].x = r_filter_1[2].x*signal[2].x - r_filter_1[2].y*signal[2].y; s_input_1[threadIdx.x + const_params::fft_length_half].y = r_filter_1[2].x*signal[2].y + r_filter_1[2].y*signal[2].x; s_input_1[threadIdx.x + const_params::fft_length_three_quarters].x = r_filter_1[3].x*signal[3].x - r_filter_1[3].y*signal[3].y; s_input_1[threadIdx.x + const_params::fft_length_three_quarters].y = r_filter_1[3].x*signal[3].y + r_filter_1[3].y*signal[3].x; s_input_2[threadIdx.x].x = r_filter_2[0].x*signal[0].x - r_filter_2[0].y*signal[0].y; s_input_2[threadIdx.x].y = r_filter_2[0].x*signal[0].y + r_filter_2[0].y*signal[0].x; s_input_2[threadIdx.x + const_params::fft_length_quarter].x = r_filter_2[1].x*signal[1].x - r_filter_2[1].y*signal[1].y; s_input_2[threadIdx.x + const_params::fft_length_quarter].y = r_filter_2[1].x*signal[1].y + r_filter_2[1].y*signal[1].x; s_input_2[threadIdx.x + const_params::fft_length_half].x = r_filter_2[2].x*signal[2].x - r_filter_2[2].y*signal[2].y; s_input_2[threadIdx.x + const_params::fft_length_half].y = r_filter_2[2].x*signal[2].y + r_filter_2[2].y*signal[2].x; s_input_2[threadIdx.x + const_params::fft_length_three_quarters].x = r_filter_2[3].x*signal[3].x - r_filter_2[3].y*signal[3].y; s_input_2[threadIdx.x + const_params::fft_length_three_quarters].y = r_filter_2[3].x*signal[3].y + r_filter_2[3].y*signal[3].x; __syncthreads(); //----------> Inverse FFT FFT_CT_DIT_4elem_2vertical_no_reorder<const_params>(s_input_1, s_input_2); //----------< // Writing out the clean part of the segment // First convolution pos = 2*t*useful_part_size*nConvolutions + blockIdx.x*useful_part_size + threadIdx.x; if( threadIdx.x>=offset && threadIdx.x<(useful_part_size+offset) ) { d_output_plane[pos - offset] = s_input_1[threadIdx.x]; } if( (threadIdx.x + const_params::fft_length_quarter)>=offset && (threadIdx.x + const_params::fft_length_quarter)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_quarter - offset] = s_input_1[threadIdx.x + const_params::fft_length_quarter]; } if( (threadIdx.x + const_params::fft_length_half)>=offset && (threadIdx.x + const_params::fft_length_half)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_half - offset] = s_input_1[threadIdx.x + const_params::fft_length_half]; } if( (threadIdx.x + const_params::fft_length_three_quarters)>=offset && (threadIdx.x + const_params::fft_length_three_quarters)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_three_quarters - offset] = s_input_1[threadIdx.x + const_params::fft_length_three_quarters]; } // Second convolution pos = pos + useful_part_size*nConvolutions; if( threadIdx.x>=offset && threadIdx.x<(useful_part_size+offset) ) { d_output_plane[pos - offset] = s_input_2[threadIdx.x]; } if( (threadIdx.x + const_params::fft_length_quarter)>=offset && (threadIdx.x + const_params::fft_length_quarter)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_quarter - offset] = s_input_2[threadIdx.x + const_params::fft_length_quarter]; } if( (threadIdx.x + const_params::fft_length_half)>=offset && (threadIdx.x + const_params::fft_length_half)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_half - offset] = s_input_2[threadIdx.x + const_params::fft_length_half]; } if( (threadIdx.x + const_params::fft_length_three_quarters)>=offset && (threadIdx.x + const_params::fft_length_three_quarters)<(useful_part_size+offset) ) { d_output_plane[pos + const_params::fft_length_three_quarters - offset] = s_input_2[threadIdx.x + const_params::fft_length_three_quarters]; } __syncthreads(); } } //***************************************************************************** //***************************************************************************** //***************************************************************************** void CONV_init(){ //---------> Specific nVidia stuff cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); } void forwardCustomFFT(float2 *d_filters, int FFT_size, int nFilters){ dim3 gridSize(nFilters, 1, 1); dim3 blockSize(FFT_size/4, 1, 1); switch(FFT_size) { case 256: k_customFFT_GPU_forward<FFT_256><<<gridSize, blockSize, FFT_size*8>>>(d_filters, d_filters); break; case 512: k_customFFT_GPU_forward<FFT_512><<<gridSize, blockSize, FFT_size*8>>>(d_filters, d_filters); break; case 1024: k_customFFT_GPU_forward<FFT_1024><<<gridSize, blockSize, FFT_size*8>>>(d_filters, d_filters); break; case 2048: k_customFFT_GPU_forward<FFT_2048><<<gridSize, blockSize, FFT_size*8>>>(d_filters, d_filters); break; case 4096: k_customFFT_GPU_forward<FFT_4096><<<gridSize, blockSize, FFT_size*8>>>(d_filters, d_filters); break; default : break; } } void conv_OLS_customFFT(float2 *d_input_signal, float2 *d_output_plane, float2 *d_filters, int signal_length, int convolution_length, int useful_part_size, int offset, int nConvolutions, int nFilters){ dim3 gridSize(nConvolutions, 1, 1); dim3 blockSize(convolution_length/4, 1, 1); switch(convolution_length) { case 256: k_GPU_conv_OLS_via_customFFT<FFT_256><<<gridSize, blockSize, convolution_length*8>>>(d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 512: k_GPU_conv_OLS_via_customFFT<FFT_512><<<gridSize, blockSize, convolution_length*8>>>(d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 1024: k_GPU_conv_OLS_via_customFFT<FFT_1024><<<gridSize, blockSize, convolution_length*8>>>(d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 2048: k_GPU_conv_OLS_via_customFFT<FFT_2048><<<gridSize, blockSize, convolution_length*8>>>(d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 4096: k_GPU_conv_OLS_via_customFFT<FFT_4096><<<gridSize, blockSize, convolution_length*8>>>(d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; default : break; } } void conv_OLS_customFFT_2filters(float2 *d_input_signal, float2 *d_output_plane, float2 *d_filters, int signal_length, int convolution_length, int useful_part_size, int offset, int nConvolutions, int nFilters){ dim3 gridSize(nConvolutions, 1, 1); dim3 blockSize(convolution_length/4, 1, 1); switch(convolution_length) { case 256: k_GPU_conv_OLS_via_customFFT_2filters<FFT_256><<<gridSize, blockSize>>>(d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 512: k_GPU_conv_OLS_via_customFFT_2filters<FFT_512><<<gridSize, blockSize>>>(d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 1024: k_GPU_conv_OLS_via_customFFT_2filters<FFT_1024><<<gridSize, blockSize>>>(d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; case 2048: k_GPU_conv_OLS_via_customFFT_2filters<FFT_2048><<<gridSize, blockSize>>>(d_input_signal, d_output_plane, d_filters, signal_length, useful_part_size, offset, nConvolutions, nFilters); break; default : break; } } void convolution_via_customFFT_benchmark(float2 *d_input_signal, float2 *d_output_plane, float2 *d_filters, int signal_length, int convolution_length, int useful_part_size, int offset, int nConvolutions, int nFilters, double *CONV_time, int kernel_type){ GpuTimer timer; // --------> Preparing filters for convolution forwardCustomFFT(d_filters, convolution_length, nFilters); // -----------------------------------------------> // --------> Measured part (Convolution) timer.Start(); CONV_init(); if(kernel_type==1){ conv_OLS_customFFT(d_input_signal, d_output_plane, d_filters, signal_length, convolution_length, useful_part_size, offset, nConvolutions, nFilters); } if(kernel_type==2){ conv_OLS_customFFT_2filters(d_input_signal, d_output_plane, d_filters, signal_length, convolution_length, useful_part_size, offset, nConvolutions, nFilters); } timer.Stop(); *CONV_time += timer.Elapsed(); // --------> Measured part (Convolution) // -----------------------------------------------> } //***************************************************************************** //***************************************************************************** //***************************************************************************** int GPU_convolution_OLS_customFFT(float2 *h_input_signal, float2 *h_output_plane, float2 *h_filters, int signal_length, int convolution_length, int filter_length, int past_filter_samples, int nFilters, int nRuns, int kernel_type, double *execution_time){ //---------> Initial nVidia stuff int devCount; size_t free_mem, total_mem; checkCudaErrors(cudaGetDeviceCount(&devCount)); if(device<devCount){ checkCudaErrors(cudaSetDevice(device)); } else { printf("ERROR! Selected device is not available\n"); return(1); } cudaMemGetInfo(&free_mem,&total_mem); //---------> Time measurements double transfer_in, transfer_out, CONV_kFFT_time, total_CONV_kFFT_time; transfer_in=0.0; transfer_out=0.0; CONV_kFFT_time=0.0; total_CONV_kFFT_time=0; GpuTimer timer; //----> Calculating variables for overlap-and-save int offset = past_filter_samples; int useful_part_size = convolution_length - filter_length + 1; int nConvolutions = (signal_length + useful_part_size - 1)/useful_part_size; if(DEBUG) printf("signal_length: %d; filter_length: %d; segment_size: %d;\n", signal_length, filter_length, convolution_length); if(DEBUG) printf("offset: %d; nConvolutions: %d; useful_part_size: %d;\n", offset, nConvolutions, useful_part_size); //---------> Defining variables and their sizes float2 *d_output_plane; float2 *d_input_signal; float2 *d_filters; size_t input_size = signal_length; size_t output_size = nConvolutions*useful_part_size*nFilters; size_t template_size = convolution_length*nFilters; //---------> Checking memory float free_memory = (float) free_mem/(1024.0*1024.0); float memory_required=(( ((float) input_size) + ((float) output_size) + ((float) template_size))*sizeof(float2))/(1024.0*1024.0); if(DEBUG) printf("\n"); if(DEBUG) printf("DEBUG:\n"); if(DEBUG) printf(" Device has %0.3f MB of total memory, which %0.3f MB is available. Memory required %0.3f MB\n", (float) total_mem/(1024.0*1024.0), free_memory ,memory_required); if(DEBUG) printf(" d_input_signal: %0.3f MB\n", ((float) input_size*sizeof(float2))/(1024.0*1024.0) ); if(DEBUG) printf(" d_filters: %0.3f MB\n", ((float) template_size*sizeof(float2))/(1024.0*1024.0) ); if(DEBUG) printf(" d_output_plane: %0.3f MB\n", ((float) output_size*sizeof(float2))/(1024.0*1024.0) ); if(memory_required>free_memory) {printf("\n \n Array is too big for the device! \n \n"); return(-3);} //---------> Memory allocation if (VERBOSE) printf("Device memory allocation...: \t\t"); timer.Start(); checkCudaErrors(cudaMalloc((void **) &d_input_signal, sizeof(float2)*input_size)); checkCudaErrors(cudaMalloc((void **) &d_output_plane, sizeof(float2)*output_size)); checkCudaErrors(cudaMalloc((void **) &d_filters, sizeof(float2)*template_size)); timer.Stop(); if (VERBOSE) printf("done in %g ms.\n", timer.Elapsed()); //------------------------------------------------------------------------------ //---------> CONV calculation //-----> Copy chunk of input data to a device if (VERBOSE) printf("Transferring data into device memory...: \t\t"); timer.Start(); checkCudaErrors(cudaMemcpy(d_input_signal, h_input_signal, input_size*sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_filters, h_filters, template_size*sizeof(float2), cudaMemcpyHostToDevice)); timer.Stop(); transfer_in+=timer.Elapsed(); if (VERBOSE) printf("done in %g ms.\n", timer.Elapsed()); if (DEBUG) printf("Calculating convolution via kFFT...: \t\t"); total_CONV_kFFT_time = 0; for(int f=0; f<nRuns; f++){ checkCudaErrors(cudaMemcpy(d_input_signal, h_input_signal, input_size*sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_filters, h_filters, template_size*sizeof(float2), cudaMemcpyHostToDevice)); convolution_via_customFFT_benchmark(d_input_signal, d_output_plane, d_filters, signal_length, convolution_length, useful_part_size, offset, nConvolutions, nFilters, &total_CONV_kFFT_time, kernel_type); checkCudaErrors(cudaGetLastError()); } CONV_kFFT_time=total_CONV_kFFT_time/nRuns; if (DEBUG) printf("done in %g ms.\n", CONV_kFFT_time); *execution_time=CONV_kFFT_time; //-----> Copy chunk of output data to host if (DEBUG) printf("Transferring data to host...: \t\t"); timer.Start(); checkCudaErrors(cudaMemcpy( h_output_plane, d_output_plane, output_size*sizeof(float2), cudaMemcpyDeviceToHost)); timer.Stop(); transfer_out+=timer.Elapsed(); if (DEBUG) printf("done in %g ms.\n", timer.Elapsed()); //---------> error check ----- checkCudaErrors(cudaGetLastError()); //---------> Feeing allocated resources checkCudaErrors(cudaFree(d_input_signal)); checkCudaErrors(cudaFree(d_output_plane)); checkCudaErrors(cudaFree(d_filters)); return(0); }
e3992b293a53f38687b06a2cc57b48739ec79b87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernels_hip.cuh" ////////////// /// SLOPE //// ////////////// // Derives the slope at a given point through comparison with neighbours. __global__ void slopeKernel(float *d_DEM, const int rows, const int cols, float *d_deg) { // Setting x,y coords from thread index int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; // Return if either index is out of bounds. if (x >= cols || y >= rows) return; // Absolute 1-D index. int absIdx = x + y*cols; float centre = d_DEM[absIdx]; float dist = 0.0f, diff = 0.0f; // Variable to hold index of next cell to compare with. int u, v, absUV; //int count = 0; float temp; for (int i = -1; i <= 1; i++) { u = x + i; if (u >= 0 && u < cols) { for (int j = -1; j <= 1; j++) { v = y + j; if (v >= 0 && v < rows) { absUV = u + v * rows; // Iterate the running tally of valid comparisons. //count++; // Calculate distance between the two points. temp = fabsf(centre - d_DEM[absUV]); // if new maximum difference in height, then set it as the new diff and calculate dist using trig. if (temp > diff) { diff = temp; dist = sqrtf(float(abs(i) + abs(j))); } } } } } // Radians float slopeDeg = atan(diff / dist); d_deg[absIdx] = slopeDeg; } ////////////// // ROUGHNESS / ////////////// // Using Root Mean Square as a measure for local roughness. __global__ void roughnessKernel(float *d_DEM, const int rows, const int cols, float *d_rough) { // Setting x,y coords from thread index int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; // Return if either index is out of bounds. if (x >= cols || y >= rows) return; // Absolute 1-D index. int absIdx = x + y*cols; // Height of centre cell. float centre = d_DEM[absIdx]; int u, v, absUV; // x,y,1-D index for neighbouring cells. int count = 0; float avg = 0.0f; for (int i = -1; i <= 1; i++) { u = x + i; if (u >= 0 && u < cols) { for (int j = -1; j <= 1; j++) { v = y + j; if (v >= 0 && v < rows && !(i == 0 && j == 0)) // Discount the centre cell. { absUV = u + v * rows; count++; // Calculate distance between the two points. avg += powf(d_DEM[absUV] - centre, 2); } } } } if (count != 0) { avg = sqrtf(avg / float(count)); d_rough[absIdx] = avg; } } ////////////// /// HAZARD /// ////////////// // Derives hazard score. // Shadow is factored in within this kernel, trivial. __global__ void hazardKernel(float *d_slope, float *d_rough, unsigned char *d_source, const int rows, const int cols, int *d_score, float* slopeMax, float* roughMax, float slopeWeight, float roughWeight, float shadowWeight) { const int hazardMax = 1000; // Arbitary maximum hazard score. // Setting x,y coords from thread index int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; // Return if either index is out of bounds. if (x >= cols || y >= rows) return; // Absolute 1-D index. int absIdx = x + y*cols; // Roughness & Slope float r = d_rough[absIdx]; float s = d_slope[absIdx]; //unsigned int sh = unsigned int(255 - d_source[absIdx]); // Testing int sh = 0; if (d_source[absIdx] < 15) sh = 1; // End Testing float normalScore = ((slopeWeight * s / slopeMax[0]) + (roughWeight * r / roughMax[0]) + (shadowWeight * sh)) / (slopeWeight + roughWeight + shadowWeight); // Scale the normalized score and round the resulting float. normalScore = rintf(normalScore * hazardMax); // Convert the float to an integer and store in array. d_score[absIdx] = float2int(normalScore); } /////////////// // GRASSFIRE // /////////////// //////////////// Raster ///////////////// __global__ void rowRasterKernel(int *d_score, const int rows, const int cols) { // Fix y-coordinate. int y = threadIdx.x + blockIdx.x * blockDim.x; if (y < rows - 1) { // max(current, left cell - 1) for (int x = 1; x < cols; x++) d_score[x + y * cols] = max(d_score[x + y * cols], d_score[x - 1 + y * cols] - 1); } } __global__ void colRasterKernel(int *d_score, const int rows, const int cols) { // Fix x-coordinate. int x = threadIdx.x + blockIdx.x * blockDim.x; if (x < cols - 1) { // max(current, cell above - 1) for (int y = 1; y < rows; y++) d_score[x + y*cols] = max(d_score[x + y*cols], d_score[x + (y - 1) * cols] - 1); } } ///////////// Anti - Raster ///////////// __global__ void rowAntiRasterKernel(int *d_score, const int rows, const int cols) { // Fix y-coordinate. int y = threadIdx.x + blockIdx.x * blockDim.x; if (y < rows - 1) { { // max(current, right cell - 1) for (int x = cols - 2; x >= 0; x--) d_score[x + y * cols] = max(d_score[x + y * cols], d_score[(x + 1) + y * cols] - 1); } } } __global__ void colAntiRasterKernel(int *d_score, const int rows, const int cols) { int x = threadIdx.x + blockIdx.x * blockDim.x; if (x < cols - 1) { // max(current, cell below - 1) for (int y = rows - 2; y >= 0; y--) d_score[x + y*cols] = max(d_score[x + y*cols], d_score[x + (y + 1) * cols] - 1); } } ///////////////// // Max kernels // ///////////////// __global__ void maxFirst(float *in_array, float *blockMax, const int n) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = stride; // Shared memory array for the block. Holds each threads maximum. __shared__ float threadMax[256]; // Set initial max to the first element for each thread. float temp = in_array[index]; // Each thread performs a comparison until all elements have been exhausted. while (index + offset < n) { // If new max store in temp variable. temp = fmaxf(temp, in_array[index + offset]); // Shift to the next element to compare. offset += stride; } // Set max for this thread in the shared array. threadMax[threadIdx.x] = temp; __syncthreads(); // Find the max of the block through reduction. // At each step half number of threads performing a comparison until only thread 0 remains, with the final result. unsigned int i = blockDim.x / 2; while (i != 0) { if (threadIdx.x < i) threadMax[threadIdx.x] = fmaxf(threadMax[threadIdx.x], threadMax[threadIdx.x + i]); __syncthreads(); i /= 2; } // threadMax[0] now contains the block maximum. __syncthreads(); // The first thread writes the final result to the output array of block maximums. if (threadIdx.x == 0) blockMax[blockIdx.x] = threadMax[0]; } // Final stage of max finding kernels. __global__ void maxSecond(float *blockMax, float* max) { // Perform a reduction on the input array of block maximums. unsigned int i = blockDim.x / 2; while (i != 0) { if (threadIdx.x < i) blockMax[threadIdx.x] = fmaxf(blockMax[threadIdx.x], blockMax[threadIdx.x + i]); __syncthreads(); i /= 2; } __syncthreads(); // Write the final result. if (threadIdx.x == 0) max[0] = blockMax[0]; } ////////////////// // Gauss Kernel // ////////////////// __global__ void gaussKernel(float* in_array, float* out_array, const int rows, const int cols) { // Hardcoded 5x5 Gaussian filter weights. float weights[25] { 0.003765, 0.015019, 0.023792, 0.015019, 0.003765, 0.015019, 0.059912, 0.094907, 0.059912, 0.015019, 0.023792, 0.094907, 0.150342, 0.094907, 0.023792, 0.015019, 0.059912, 0.094907, 0.059912, 0.015019, 0.003765, 0.015019, 0.023792, 0.015019, 0.003765 }; __shared__ float filter[25]; if (threadIdx.x == 0) { for (int i = 0; i < 25; i++) { filter[i] = weights[i]; } } __syncthreads(); // Setting x,y coords from thread index int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; // Return if either index is out of bounds. if (x >= cols || y >= rows) return; // Absolute 1-D index. int absIdx = x + y*cols; int filterWidth = 5; // Width of the gaussian filter, hardcoded to 5. int half = filterWidth / 2; float blur = 0.f; // will contained blurred value int width = cols - 1; int height = rows - 1; for (int i = -half; i <= half; ++i) // rows { for (int j = -half; j <= half; ++j) // columns { // Clamp filter to the image border int w = min(max(x + j, 0), width); int h = min(max(y + i, 0), height); // Blur is a product of current pixel value and weight of that pixel. // Remember that sum of all weights equals to 1, so we are averaging sum of all pixels by their weight. int idx = w + cols * h; // current pixel index float value = in_array[idx]; idx = (i + half) * filterWidth + j + half; float weight = filter[idx]; blur += value * weight; } } out_array[absIdx] = blur; }
e3992b293a53f38687b06a2cc57b48739ec79b87.cu
#include "kernels.cuh" ////////////// /// SLOPE //// ////////////// // Derives the slope at a given point through comparison with neighbours. __global__ void slopeKernel(float *d_DEM, const int rows, const int cols, float *d_deg) { // Setting x,y coords from thread index int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; // Return if either index is out of bounds. if (x >= cols || y >= rows) return; // Absolute 1-D index. int absIdx = x + y*cols; float centre = d_DEM[absIdx]; float dist = 0.0f, diff = 0.0f; // Variable to hold index of next cell to compare with. int u, v, absUV; //int count = 0; float temp; for (int i = -1; i <= 1; i++) { u = x + i; if (u >= 0 && u < cols) { for (int j = -1; j <= 1; j++) { v = y + j; if (v >= 0 && v < rows) { absUV = u + v * rows; // Iterate the running tally of valid comparisons. //count++; // Calculate distance between the two points. temp = fabsf(centre - d_DEM[absUV]); // if new maximum difference in height, then set it as the new diff and calculate dist using trig. if (temp > diff) { diff = temp; dist = sqrtf(float(abs(i) + abs(j))); } } } } } // Radians float slopeDeg = atan(diff / dist); d_deg[absIdx] = slopeDeg; } ////////////// // ROUGHNESS / ////////////// // Using Root Mean Square as a measure for local roughness. __global__ void roughnessKernel(float *d_DEM, const int rows, const int cols, float *d_rough) { // Setting x,y coords from thread index int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; // Return if either index is out of bounds. if (x >= cols || y >= rows) return; // Absolute 1-D index. int absIdx = x + y*cols; // Height of centre cell. float centre = d_DEM[absIdx]; int u, v, absUV; // x,y,1-D index for neighbouring cells. int count = 0; float avg = 0.0f; for (int i = -1; i <= 1; i++) { u = x + i; if (u >= 0 && u < cols) { for (int j = -1; j <= 1; j++) { v = y + j; if (v >= 0 && v < rows && !(i == 0 && j == 0)) // Discount the centre cell. { absUV = u + v * rows; count++; // Calculate distance between the two points. avg += powf(d_DEM[absUV] - centre, 2); } } } } if (count != 0) { avg = sqrtf(avg / float(count)); d_rough[absIdx] = avg; } } ////////////// /// HAZARD /// ////////////// // Derives hazard score. // Shadow is factored in within this kernel, trivial. __global__ void hazardKernel(float *d_slope, float *d_rough, unsigned char *d_source, const int rows, const int cols, int *d_score, float* slopeMax, float* roughMax, float slopeWeight, float roughWeight, float shadowWeight) { const int hazardMax = 1000; // Arbitary maximum hazard score. // Setting x,y coords from thread index int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; // Return if either index is out of bounds. if (x >= cols || y >= rows) return; // Absolute 1-D index. int absIdx = x + y*cols; // Roughness & Slope float r = d_rough[absIdx]; float s = d_slope[absIdx]; //unsigned int sh = unsigned int(255 - d_source[absIdx]); // Testing int sh = 0; if (d_source[absIdx] < 15) sh = 1; // End Testing float normalScore = ((slopeWeight * s / slopeMax[0]) + (roughWeight * r / roughMax[0]) + (shadowWeight * sh)) / (slopeWeight + roughWeight + shadowWeight); // Scale the normalized score and round the resulting float. normalScore = rintf(normalScore * hazardMax); // Convert the float to an integer and store in array. d_score[absIdx] = float2int(normalScore); } /////////////// // GRASSFIRE // /////////////// //////////////// Raster ///////////////// __global__ void rowRasterKernel(int *d_score, const int rows, const int cols) { // Fix y-coordinate. int y = threadIdx.x + blockIdx.x * blockDim.x; if (y < rows - 1) { // max(current, left cell - 1) for (int x = 1; x < cols; x++) d_score[x + y * cols] = max(d_score[x + y * cols], d_score[x - 1 + y * cols] - 1); } } __global__ void colRasterKernel(int *d_score, const int rows, const int cols) { // Fix x-coordinate. int x = threadIdx.x + blockIdx.x * blockDim.x; if (x < cols - 1) { // max(current, cell above - 1) for (int y = 1; y < rows; y++) d_score[x + y*cols] = max(d_score[x + y*cols], d_score[x + (y - 1) * cols] - 1); } } ///////////// Anti - Raster ///////////// __global__ void rowAntiRasterKernel(int *d_score, const int rows, const int cols) { // Fix y-coordinate. int y = threadIdx.x + blockIdx.x * blockDim.x; if (y < rows - 1) { { // max(current, right cell - 1) for (int x = cols - 2; x >= 0; x--) d_score[x + y * cols] = max(d_score[x + y * cols], d_score[(x + 1) + y * cols] - 1); } } } __global__ void colAntiRasterKernel(int *d_score, const int rows, const int cols) { int x = threadIdx.x + blockIdx.x * blockDim.x; if (x < cols - 1) { // max(current, cell below - 1) for (int y = rows - 2; y >= 0; y--) d_score[x + y*cols] = max(d_score[x + y*cols], d_score[x + (y + 1) * cols] - 1); } } ///////////////// // Max kernels // ///////////////// __global__ void maxFirst(float *in_array, float *blockMax, const int n) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = stride; // Shared memory array for the block. Holds each threads maximum. __shared__ float threadMax[256]; // Set initial max to the first element for each thread. float temp = in_array[index]; // Each thread performs a comparison until all elements have been exhausted. while (index + offset < n) { // If new max store in temp variable. temp = fmaxf(temp, in_array[index + offset]); // Shift to the next element to compare. offset += stride; } // Set max for this thread in the shared array. threadMax[threadIdx.x] = temp; __syncthreads(); // Find the max of the block through reduction. // At each step half number of threads performing a comparison until only thread 0 remains, with the final result. unsigned int i = blockDim.x / 2; while (i != 0) { if (threadIdx.x < i) threadMax[threadIdx.x] = fmaxf(threadMax[threadIdx.x], threadMax[threadIdx.x + i]); __syncthreads(); i /= 2; } // threadMax[0] now contains the block maximum. __syncthreads(); // The first thread writes the final result to the output array of block maximums. if (threadIdx.x == 0) blockMax[blockIdx.x] = threadMax[0]; } // Final stage of max finding kernels. __global__ void maxSecond(float *blockMax, float* max) { // Perform a reduction on the input array of block maximums. unsigned int i = blockDim.x / 2; while (i != 0) { if (threadIdx.x < i) blockMax[threadIdx.x] = fmaxf(blockMax[threadIdx.x], blockMax[threadIdx.x + i]); __syncthreads(); i /= 2; } __syncthreads(); // Write the final result. if (threadIdx.x == 0) max[0] = blockMax[0]; } ////////////////// // Gauss Kernel // ////////////////// __global__ void gaussKernel(float* in_array, float* out_array, const int rows, const int cols) { // Hardcoded 5x5 Gaussian filter weights. float weights[25] { 0.003765, 0.015019, 0.023792, 0.015019, 0.003765, 0.015019, 0.059912, 0.094907, 0.059912, 0.015019, 0.023792, 0.094907, 0.150342, 0.094907, 0.023792, 0.015019, 0.059912, 0.094907, 0.059912, 0.015019, 0.003765, 0.015019, 0.023792, 0.015019, 0.003765 }; __shared__ float filter[25]; if (threadIdx.x == 0) { for (int i = 0; i < 25; i++) { filter[i] = weights[i]; } } __syncthreads(); // Setting x,y coords from thread index int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; // Return if either index is out of bounds. if (x >= cols || y >= rows) return; // Absolute 1-D index. int absIdx = x + y*cols; int filterWidth = 5; // Width of the gaussian filter, hardcoded to 5. int half = filterWidth / 2; float blur = 0.f; // will contained blurred value int width = cols - 1; int height = rows - 1; for (int i = -half; i <= half; ++i) // rows { for (int j = -half; j <= half; ++j) // columns { // Clamp filter to the image border int w = min(max(x + j, 0), width); int h = min(max(y + i, 0), height); // Blur is a product of current pixel value and weight of that pixel. // Remember that sum of all weights equals to 1, so we are averaging sum of all pixels by their weight. int idx = w + cols * h; // current pixel index float value = in_array[idx]; idx = (i + half) * filterWidth + j + half; float weight = filter[idx]; blur += value * weight; } } out_array[absIdx] = blur; }
94e16e7201b16d3167a71195e6a888179d43e0f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common/book.h" #include "common/cpu_bitmap.h" #define DIM 1024 #define PI 3.1415926535897932f #define THREAD_DIM 16 __global__ void kernel(unsigned char *ptr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y *blockDim.x * gridDim.x; __shared__ float shared[THREAD_DIM][THREAD_DIM]; const float period = 128.0f; shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x * 2.0f * PI / period) + 1.0f) * (sinf(y * 2.0f * PI / period) + 1.0f) / 4.0f; __syncthreads(); ptr[offset*4 + 0] = 0; ptr[offset*4 + 1] = shared[15 - threadIdx.x][15 - threadIdx.y]; ptr[offset*4 + 2] = 0; ptr[offset*4 + 3] = 255; } int main(void) { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; HANDLE_ERROR( hipMallocManaged(&dev_bitmap, bitmap.image_size()) ); dim3 grids(DIM/THREAD_DIM, DIM/THREAD_DIM); dim3 threads(THREAD_DIM, THREAD_DIM); hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, dev_bitmap); HANDLE_ERROR( hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost) ); bitmap.display_and_exit(); hipFree(dev_bitmap); }
94e16e7201b16d3167a71195e6a888179d43e0f0.cu
#include "cuda.h" #include "common/book.h" #include "common/cpu_bitmap.h" #define DIM 1024 #define PI 3.1415926535897932f #define THREAD_DIM 16 __global__ void kernel(unsigned char *ptr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y *blockDim.x * gridDim.x; __shared__ float shared[THREAD_DIM][THREAD_DIM]; const float period = 128.0f; shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x * 2.0f * PI / period) + 1.0f) * (sinf(y * 2.0f * PI / period) + 1.0f) / 4.0f; __syncthreads(); ptr[offset*4 + 0] = 0; ptr[offset*4 + 1] = shared[15 - threadIdx.x][15 - threadIdx.y]; ptr[offset*4 + 2] = 0; ptr[offset*4 + 3] = 255; } int main(void) { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; HANDLE_ERROR( cudaMallocManaged(&dev_bitmap, bitmap.image_size()) ); dim3 grids(DIM/THREAD_DIM, DIM/THREAD_DIM); dim3 threads(THREAD_DIM, THREAD_DIM); kernel<<<grids, threads>>>(dev_bitmap); HANDLE_ERROR( cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost) ); bitmap.display_and_exit(); cudaFree(dev_bitmap); }
a0f83f40a4597c9c59ebba7259cfe8550f401318.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Matrix Vector multiplication with copying more data out from one vector than allocated. */ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> //Grid dimension #define B 100 //Block dimension #define T 256 //Array size #define C B*T // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ hipError_t cuErr = call; \ if(hipSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErr));\ exit(0); \ } \ }while(0) //Host pointer for matrix b, input vector a and result vector c int *a; int *b; int *c; //Device pointer for matrix d_b, input vector d_a and result vector d_c int *d_a; int *d_b; int *d_c; //Initialization and allocation of the host variables int init(){ //Allocating host variables a = (int *) malloc(C*sizeof(int)); b = (int *) malloc(C*C*sizeof(int)); c = (int *) malloc(C*sizeof(int)); //Initialize host values for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } //Kernel __global__ void Mult(int* d_a, int* d_b, int* d_c){ int tid = blockDim.x * blockIdx.x + threadIdx.x; for(int j=0; j<C; j++){ d_c[tid]+=d_b[j+tid*C]*d_a[j]; } } //Checking if the values stored in c are correct int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true\n" : "false\n"); return 0; } //Initialization of the variables on the GPU int initcuda(){ //Allocation of GPU memory for d_a,d_b,d_c cudaErrorCheck( hipMalloc(&d_a, C*sizeof(int))); cudaErrorCheck( hipMalloc(&d_b, C*C*sizeof(int))); cudaErrorCheck( hipMalloc(&d_c, C*sizeof(int))); //Copying the array a and the matrix b from the host to the array d_a and the matrix d_b on the device cudaErrorCheck( hipMemcpy(d_a,a,C*sizeof(int),hipMemcpyHostToDevice)); cudaErrorCheck( hipMemcpy(d_b,b,C*C*sizeof(int),hipMemcpyHostToDevice)); return 0; } //Main programm int main(){ //Calling the initialization methods init(); initcuda(); //Launch Kernel hipLaunchKernelGGL(( Mult), dim3(B),dim3(T), 0, 0, d_a,d_b,d_c); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) cudaErrorCheck( hipGetLastError()); // Check for errors on the GPU after control is returned to CPU cudaErrorCheck( hipDeviceSynchronize()); //Copying back twice the result d_c from the device to the host array c cudaErrorCheck( hipMemcpy(c,d_c,2*C*sizeof(int),hipMemcpyDeviceToHost)); //Verify result check(); //Freeing GPU memory cudaErrorCheck( hipFree(d_a)); cudaErrorCheck( hipFree(d_b)); cudaErrorCheck( hipFree(d_c)); //Freeing CPU memory free(a); free(b); free(c); return 0; }
a0f83f40a4597c9c59ebba7259cfe8550f401318.cu
/* Matrix Vector multiplication with copying more data out from one vector than allocated. */ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> //Grid dimension #define B 100 //Block dimension #define T 256 //Array size #define C B*T // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ cudaError_t cuErr = call; \ if(cudaSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\ exit(0); \ } \ }while(0) //Host pointer for matrix b, input vector a and result vector c int *a; int *b; int *c; //Device pointer for matrix d_b, input vector d_a and result vector d_c int *d_a; int *d_b; int *d_c; //Initialization and allocation of the host variables int init(){ //Allocating host variables a = (int *) malloc(C*sizeof(int)); b = (int *) malloc(C*C*sizeof(int)); c = (int *) malloc(C*sizeof(int)); //Initialize host values for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } //Kernel __global__ void Mult(int* d_a, int* d_b, int* d_c){ int tid = blockDim.x * blockIdx.x + threadIdx.x; for(int j=0; j<C; j++){ d_c[tid]+=d_b[j+tid*C]*d_a[j]; } } //Checking if the values stored in c are correct int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true\n" : "false\n"); return 0; } //Initialization of the variables on the GPU int initcuda(){ //Allocation of GPU memory for d_a,d_b,d_c cudaErrorCheck( cudaMalloc(&d_a, C*sizeof(int))); cudaErrorCheck( cudaMalloc(&d_b, C*C*sizeof(int))); cudaErrorCheck( cudaMalloc(&d_c, C*sizeof(int))); //Copying the array a and the matrix b from the host to the array d_a and the matrix d_b on the device cudaErrorCheck( cudaMemcpy(d_a,a,C*sizeof(int),cudaMemcpyHostToDevice)); cudaErrorCheck( cudaMemcpy(d_b,b,C*C*sizeof(int),cudaMemcpyHostToDevice)); return 0; } //Main programm int main(){ //Calling the initialization methods init(); initcuda(); //Launch Kernel Mult<<<B,T>>>(d_a,d_b,d_c); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) cudaErrorCheck( cudaGetLastError()); // Check for errors on the GPU after control is returned to CPU cudaErrorCheck( cudaDeviceSynchronize()); //Copying back twice the result d_c from the device to the host array c cudaErrorCheck( cudaMemcpy(c,d_c,2*C*sizeof(int),cudaMemcpyDeviceToHost)); //Verify result check(); //Freeing GPU memory cudaErrorCheck( cudaFree(d_a)); cudaErrorCheck( cudaFree(d_b)); cudaErrorCheck( cudaFree(d_c)); //Freeing CPU memory free(a); free(b); free(c); return 0; }
952061de042e15a16aff31d0800740c0b3df7e67.hip
// !!! This is a file automatically generated by hipify!!! /* ================================================================== Programmers: Alfredo Peguero Tejada & Douglas Franz A molecular dynamics NVE code for GPU. To compile: nvcc my_file.cu -o my_exe in the rc machines run with, e.g. ./my_exe ================================================================== */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <time.h> #include <hip/hip_runtime.h> #include <iostream> #include <string> #include <sstream> #include <algorithm> #include <iterator> #include <fstream> #include <map> #include "constants.cpp" #include "system.cpp" #include <vector> /* descriptors for single atom in the tree */ typedef struct atomdesc { double px, py, pz, vx, vy, vz, ax, ay, az, fx, fy, fz, charge, mass, LJsig, LJeps; char name[2]; } atom; atom * atom_list; /* list of all data points for GPU */ // These are for an old way of tracking time struct timezone Idunno; struct timeval startTime, endTime; // set a checkpoint and show the (natural) running time in seconds double report_running_time() { long sec_diff, usec_diff; gettimeofday(&endTime, &Idunno); sec_diff = endTime.tv_sec - startTime.tv_sec; usec_diff= endTime.tv_usec-startTime.tv_usec; if(usec_diff < 0) { sec_diff --; usec_diff += 1000000; } printf("Running time: %ld.%06ld s\n", sec_diff, usec_diff); printf("----------------------------------------------\n"); return (double)(sec_diff*1.0 + usec_diff/1000000.0); } __global__ void runTimeStep(atom * atom_list_old, atom * atom_list_new, double ts, int n) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { // first: integrate (Velocity Verlet) //integrate(atom_list_old[i], atom_list_new[i], ts); atom_list_new[i].px = atom_list_old[i].px + atom_list_old[i].vx * ts + 0.5 * atom_list_old[i].ax * ts *ts; atom_list_new[i].py = atom_list_old[i].py + atom_list_old[i].vy * ts + 0.5 * atom_list_old[i].ay * ts *ts; atom_list_new[i].pz = atom_list_old[i].pz + atom_list_old[i].vz * ts + 0.5 * atom_list_old[i].az * ts *ts; // calculate forces // initialize to zero. atom_list_new[i].fx = 0.0; atom_list_new[i].fy = 0.0; atom_list_new[i].fz = 0.0; // loop through pairs for (int j=i+1; j<n; j++) { // check mixing rules double eps = sqrt(atom_list_new[i].LJeps * atom_list_new[j].LJeps); double sig = 0.5*(atom_list_new[i].LJsig + atom_list_new[j].LJsig); // distances etc. double dx,dy,dz,rsq,r,ux,uy,uz,fx,fy,fz,ke; dx = atom_list_new[i].px - atom_list_new[j].px; dy = atom_list_new[i].py - atom_list_new[j].py; dz = atom_list_new[i].pz - atom_list_new[j].pz; rsq = dx*dx + dy*dy + dz*dz; r = sqrt(rsq); ux = dx/r; uy = dy/r; uz = dz/r; // LJ force fx = 24*dx*eps*(2*pow(sig,12)*pow(r,-14) - pow(sig,6)*pow(r,-8)); fy = 24*dy*eps*(2*pow(sig,12)*pow(r,-14) - pow(sig,6)*pow(r,-8)); fz = 24*dz*eps*(2*pow(sig,12)*pow(r,-14) - pow(sig,6)*pow(r,-8)); atom_list_new[i].fx += fx; atom_list_new[i].fy += fy; atom_list_new[i].fz += fz; atom_list_new[j].fx -= fx; atom_list_new[j].fy -= fy; atom_list_new[j].fz -= fz; // electrostatic force ke = 8.987551787e9; fx = (ke * (atom_list_new[i].charge * atom_list_new[j].charge)/rsq) * ux; fy = (ke * (atom_list_new[i].charge * atom_list_new[j].charge)/rsq) * uy; fz = (ke * (atom_list_new[i].charge * atom_list_new[j].charge)/rsq) * uz; atom_list_new[i].fx += fx; atom_list_new[i].fy += fy; atom_list_new[i].fz += fz; atom_list_new[j].fx -= fx; atom_list_new[j].fy -= fy; atom_list_new[j].fz -= fz; } } } /* __device__ newToOld(atom * old, atom * new, int n) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { old[i] = new[i]; } } */ void runMD(atom * atom_list, int n, float ts, float tf) { int block_size = 32; // define memory requirements for atoms/histogram datasets. int atoms_size = n * sizeof(atom); // write new device variable pointers atom *d_atom_list_old; // = atom_list; atom *d_atom_list_new; // allocate gpu memory and send data to gpu to old hipMalloc((void**) &d_atom_list_old, atoms_size); hipMemcpy(d_atom_list_old, atom_list, atoms_size, hipMemcpyHostToDevice); // and the new (duplicate) hipMalloc((void**) &d_atom_list_new, atoms_size); //hipMemcpy(d_atom_list_new, atom_list, atoms_size, hipMemcpyHostToDevice); dim3 dimGrid(ceil(n/block_size),1,1); dim3 dimBlock(block_size,1,1); // time it hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord( start, 0 ); // go diego go for (float ti=0.0; ti <= tf; ti+=ts) { hipLaunchKernelGGL(( runTimeStep), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_atom_list_old, d_atom_list_new, ts, n ); //newToOld<<< dimGrid, dimBlock >>>( d_atom_list_new, d_atom_list_old, n ); } // fetch kernel runtime hipEventRecord ( stop, 0 ); hipEventSynchronize( stop ); float elapsedTime; hipEventElapsedTime( &elapsedTime, start, stop ); printf( "******** Total Running Time of doIt Kernel: %0.5f s ********\n", elapsedTime/1000.0 ); hipEventDestroy( start ); hipEventDestroy( stop ); // all done. Free up device memory. hipFree(d_atom_list_old); hipFree(d_atom_list_new); } void readFile(System &system, atom * atom_list) { //printf("%le",system.constants.kb); string line; ifstream myfile ("test2.dat"); // test2.dat if (myfile.is_open()) { //std::string::size_type sz; // alias of size_t // loop through each line int id = 0; while ( getline (myfile,line) ) { vector<string> lc; istringstream iss(line); //ostream_iterator<string> out_it (cout,","); copy( istream_iterator<string>(iss), istream_iterator<string>(), back_inserter(lc) // "normally" out_it goes here. ); // make the atom from the current line. atom ca; ca.name[0] = lc[0].c_str()[0]; ca.name[1] = lc[0].c_str()[1]; ca.px = atof(lc[1].c_str())* system.constants.cA; ca.py = atof(lc[2].c_str())* system.constants.cA; ca.pz = atof(lc[3].c_str())* system.constants.cA; ca.charge = atof(lc[4].c_str()); ca.vx = 0.0; ca.vy = 0.0; ca.vz = 0.0; ca.ax = 0.0; ca.ay = 0.0; ca.az = 0.0; ca.fx = 0.0; ca.fy = 0.0; ca.fz = 0.0; ca.LJsig = system.constants.sigs[lc[0]]; ca.LJeps = system.constants.eps[lc[0]]; ca.mass = system.constants.masses[lc[0]]; //printf("%c%c %le %le %le %f %f %f %f %f %f %f %f %f %f %le %le %le\n",ca.name[0],ca.name[1], ca.px, ca.py, ca.pz, ca.charge, ca.vx, ca.vy, ca.vz, ca.ax, ca.ay, ca.az, ca.fx, ca.fy, ca.fz, ca.LJsig, ca.LJeps, ca.mass); atom_list[id] = ca; id++; } } } //// MAIN ============================================================= int main(int argc, char **argv) { int n = 75; System system; float ts = 1.0e-15; float tf = 100e-15; // variable and memory assignments atom_list = (atom *)malloc(sizeof(atom)*n); //atom_list[200]; // read da file which assigns atoms to atom_list readFile(system, atom_list); // time the entire GPU process. gettimeofday(&startTime, &Idunno); // run the function which calls the kernel, times the kernel, etc. runMD(atom_list, n, ts, tf); // uses same atom list as cpu code // spit back runtime. report_running_time(); return 0; }
952061de042e15a16aff31d0800740c0b3df7e67.cu
/* ================================================================== Programmers: Alfredo Peguero Tejada & Douglas Franz A molecular dynamics NVE code for GPU. To compile: nvcc my_file.cu -o my_exe in the rc machines run with, e.g. ./my_exe ================================================================== */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <time.h> #include <cuda.h> #include <iostream> #include <string> #include <sstream> #include <algorithm> #include <iterator> #include <fstream> #include <map> #include "constants.cpp" #include "system.cpp" #include <vector> /* descriptors for single atom in the tree */ typedef struct atomdesc { double px, py, pz, vx, vy, vz, ax, ay, az, fx, fy, fz, charge, mass, LJsig, LJeps; char name[2]; } atom; atom * atom_list; /* list of all data points for GPU */ // These are for an old way of tracking time struct timezone Idunno; struct timeval startTime, endTime; // set a checkpoint and show the (natural) running time in seconds double report_running_time() { long sec_diff, usec_diff; gettimeofday(&endTime, &Idunno); sec_diff = endTime.tv_sec - startTime.tv_sec; usec_diff= endTime.tv_usec-startTime.tv_usec; if(usec_diff < 0) { sec_diff --; usec_diff += 1000000; } printf("Running time: %ld.%06ld s\n", sec_diff, usec_diff); printf("----------------------------------------------\n"); return (double)(sec_diff*1.0 + usec_diff/1000000.0); } __global__ void runTimeStep(atom * atom_list_old, atom * atom_list_new, double ts, int n) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { // first: integrate (Velocity Verlet) //integrate(atom_list_old[i], atom_list_new[i], ts); atom_list_new[i].px = atom_list_old[i].px + atom_list_old[i].vx * ts + 0.5 * atom_list_old[i].ax * ts *ts; atom_list_new[i].py = atom_list_old[i].py + atom_list_old[i].vy * ts + 0.5 * atom_list_old[i].ay * ts *ts; atom_list_new[i].pz = atom_list_old[i].pz + atom_list_old[i].vz * ts + 0.5 * atom_list_old[i].az * ts *ts; // calculate forces // initialize to zero. atom_list_new[i].fx = 0.0; atom_list_new[i].fy = 0.0; atom_list_new[i].fz = 0.0; // loop through pairs for (int j=i+1; j<n; j++) { // check mixing rules double eps = sqrt(atom_list_new[i].LJeps * atom_list_new[j].LJeps); double sig = 0.5*(atom_list_new[i].LJsig + atom_list_new[j].LJsig); // distances etc. double dx,dy,dz,rsq,r,ux,uy,uz,fx,fy,fz,ke; dx = atom_list_new[i].px - atom_list_new[j].px; dy = atom_list_new[i].py - atom_list_new[j].py; dz = atom_list_new[i].pz - atom_list_new[j].pz; rsq = dx*dx + dy*dy + dz*dz; r = sqrt(rsq); ux = dx/r; uy = dy/r; uz = dz/r; // LJ force fx = 24*dx*eps*(2*pow(sig,12)*pow(r,-14) - pow(sig,6)*pow(r,-8)); fy = 24*dy*eps*(2*pow(sig,12)*pow(r,-14) - pow(sig,6)*pow(r,-8)); fz = 24*dz*eps*(2*pow(sig,12)*pow(r,-14) - pow(sig,6)*pow(r,-8)); atom_list_new[i].fx += fx; atom_list_new[i].fy += fy; atom_list_new[i].fz += fz; atom_list_new[j].fx -= fx; atom_list_new[j].fy -= fy; atom_list_new[j].fz -= fz; // electrostatic force ke = 8.987551787e9; fx = (ke * (atom_list_new[i].charge * atom_list_new[j].charge)/rsq) * ux; fy = (ke * (atom_list_new[i].charge * atom_list_new[j].charge)/rsq) * uy; fz = (ke * (atom_list_new[i].charge * atom_list_new[j].charge)/rsq) * uz; atom_list_new[i].fx += fx; atom_list_new[i].fy += fy; atom_list_new[i].fz += fz; atom_list_new[j].fx -= fx; atom_list_new[j].fy -= fy; atom_list_new[j].fz -= fz; } } } /* __device__ newToOld(atom * old, atom * new, int n) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { old[i] = new[i]; } } */ void runMD(atom * atom_list, int n, float ts, float tf) { int block_size = 32; // define memory requirements for atoms/histogram datasets. int atoms_size = n * sizeof(atom); // write new device variable pointers atom *d_atom_list_old; // = atom_list; atom *d_atom_list_new; // allocate gpu memory and send data to gpu to old cudaMalloc((void**) &d_atom_list_old, atoms_size); cudaMemcpy(d_atom_list_old, atom_list, atoms_size, cudaMemcpyHostToDevice); // and the new (duplicate) cudaMalloc((void**) &d_atom_list_new, atoms_size); //cudaMemcpy(d_atom_list_new, atom_list, atoms_size, cudaMemcpyHostToDevice); dim3 dimGrid(ceil(n/block_size),1,1); dim3 dimBlock(block_size,1,1); // time it cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); // go diego go for (float ti=0.0; ti <= tf; ti+=ts) { runTimeStep<<< dimGrid, dimBlock >>>( d_atom_list_old, d_atom_list_new, ts, n ); //newToOld<<< dimGrid, dimBlock >>>( d_atom_list_new, d_atom_list_old, n ); } // fetch kernel runtime cudaEventRecord ( stop, 0 ); cudaEventSynchronize( stop ); float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, stop ); printf( "******** Total Running Time of doIt Kernel: %0.5f s ********\n", elapsedTime/1000.0 ); cudaEventDestroy( start ); cudaEventDestroy( stop ); // all done. Free up device memory. cudaFree(d_atom_list_old); cudaFree(d_atom_list_new); } void readFile(System &system, atom * atom_list) { //printf("%le",system.constants.kb); string line; ifstream myfile ("test2.dat"); // test2.dat if (myfile.is_open()) { //std::string::size_type sz; // alias of size_t // loop through each line int id = 0; while ( getline (myfile,line) ) { vector<string> lc; istringstream iss(line); //ostream_iterator<string> out_it (cout,","); copy( istream_iterator<string>(iss), istream_iterator<string>(), back_inserter(lc) // "normally" out_it goes here. ); // make the atom from the current line. atom ca; ca.name[0] = lc[0].c_str()[0]; ca.name[1] = lc[0].c_str()[1]; ca.px = atof(lc[1].c_str())* system.constants.cA; ca.py = atof(lc[2].c_str())* system.constants.cA; ca.pz = atof(lc[3].c_str())* system.constants.cA; ca.charge = atof(lc[4].c_str()); ca.vx = 0.0; ca.vy = 0.0; ca.vz = 0.0; ca.ax = 0.0; ca.ay = 0.0; ca.az = 0.0; ca.fx = 0.0; ca.fy = 0.0; ca.fz = 0.0; ca.LJsig = system.constants.sigs[lc[0]]; ca.LJeps = system.constants.eps[lc[0]]; ca.mass = system.constants.masses[lc[0]]; //printf("%c%c %le %le %le %f %f %f %f %f %f %f %f %f %f %le %le %le\n",ca.name[0],ca.name[1], ca.px, ca.py, ca.pz, ca.charge, ca.vx, ca.vy, ca.vz, ca.ax, ca.ay, ca.az, ca.fx, ca.fy, ca.fz, ca.LJsig, ca.LJeps, ca.mass); atom_list[id] = ca; id++; } } } //// MAIN ============================================================= int main(int argc, char **argv) { int n = 75; System system; float ts = 1.0e-15; float tf = 100e-15; // variable and memory assignments atom_list = (atom *)malloc(sizeof(atom)*n); //atom_list[200]; // read da file which assigns atoms to atom_list readFile(system, atom_list); // time the entire GPU process. gettimeofday(&startTime, &Idunno); // run the function which calls the kernel, times the kernel, etc. runMD(atom_list, n, ts, tf); // uses same atom list as cpu code // spit back runtime. report_running_time(); return 0; }
f2838b5d5ab482a118a5e213ed1f7b2186d11052.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GAMER.h" #include "CUPOT.h" #if ( defined GPU && defined GRAVITY ) // Poisson solver prototypes #if ( POT_SCHEME == SOR ) #ifdef USE_PSOLVER_10TO14 __global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #else __global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #endif // #ifdef USE_PSOLVER_10TO14 ... else ... #elif ( POT_SCHEME == MG ) __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ); #endif // POT_SCHEME // Gravity solver prototypes #if ( MODEL == HYDRO ) __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array_New[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real g_Pot_Array_USG[][ USG_NXT_G*USG_NXT_G*USG_NXT_G ], const real g_Flu_Array_USG[][GRA_NIN-1][ PS1*PS1*PS1 ], char g_DE_Array[][ PS1*PS1*PS1 ], const real Gra_Const, const bool P5_Gradient, const OptGravityType_t GravityType, const double TimeNew, const double TimeOld, const real dt, const real dh, const real MinEint ); #elif ( MODEL == MHD ) #warning : WAIT MHD !!! #elif ( MODEL == ELBDM ) __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real EtaDt, const real dh, const real Lambda, const bool ExtPot, const double TimeNew ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL // declare all device pointers extern real (*d_Rho_Array_P )[ RHO_NXT*RHO_NXT*RHO_NXT ]; extern real (*d_Pot_Array_P_In )[ POT_NXT*POT_NXT*POT_NXT ]; extern real (*d_Pot_Array_P_Out)[ GRA_NXT*GRA_NXT*GRA_NXT ]; extern real (*d_Flu_Array_G )[GRA_NIN][ PS1*PS1*PS1 ]; extern double (*d_Corner_Array_G)[3]; #if ( MODEL == HYDRO || MODEL == MHD ) #ifdef UNSPLIT_GRAVITY extern real (*d_Pot_Array_USG_G)[ USG_NXT_G*USG_NXT_G*USG_NXT_G ]; extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ PS1*PS1*PS1 ]; #else static real (*d_Pot_Array_USG_G)[ USG_NXT_G*USG_NXT_G*USG_NXT_G ] = NULL; static real (*d_Flu_Array_USG_G)[GRA_NIN-1][ PS1*PS1*PS1 ] = NULL; #endif #ifdef DUAL_ENERGY extern char (*d_DE_Array_G)[ PS1*PS1*PS1 ]; #else static char (*d_DE_Array_G)[ PS1*PS1*PS1 ] = NULL; #endif #endif // #if ( MODEL == HYDRO || MODEL == MHD ) extern hipStream_t *Stream; //------------------------------------------------------------------------------------------------------- // Function : CUAPI_Asyn_PoissonGravitySolver // Description : Invoke the CUPOT_PoissonSolver_XXtoXXcube and/or CUPOT_GravitySolver kernel(s) to evaluate // the gravitational potential and/or advance the fluid variables by the gravitational // acceleration for a group of patches // // *********************************************************** // ** Asynchronous Function ** // ** ** // ** will return before the execution in GPU is complete ** // *********************************************************** // // Note : a. Use streams for the asychronous memory copy between device and host // b. Prefix "d" : for pointers pointing to the "Device" memory space // Prefix "h" : for pointers pointing to the "Host" memory space // // Parameter : h_Rho_Array : Host array storing the input density // h_Pot_Array_In : Host array storing the input "coarse-grid" potential for interpolation // h_Pot_Array_Out : Host array to store the output potential // h_Flu_Array : Host array to store the fluid variables for the Gravity solver // h_Corner_Array : Host array storing the physical corner coordinates of each patch // h_Pot_Array_USG : Host array storing the prepared potential for UNSPLIT_GRAVITY // h_Flu_Array_USG : Host array storing the prepared density + momentum for UNSPLIT_GRAVITY // h_DE_Array : Host array storing the dual-energy status (for both input and output) // NPatchGroup : Number of patch groups evaluated simultaneously by GPU // dt : Time interval to advance solution // dh : Grid size // SOR_Min_Iter : Minimum # of iterations for SOR // SOR_Max_Iter : Maximum # of iterations for SOR // SOR_Omega : Over-relaxation parameter // MG_Max_Iter : Maximum number of iterations for multigrid // MG_NPre_Smooth : Number of pre-smoothing steps for multigrid // MG_NPos_tSmooth : Number of post-smoothing steps for multigrid // MG_Tolerated_Error : Maximum tolerated error for multigrid // Poi_Coeff : Coefficient in front of density in the Poisson equation (4*Pi*Newton_G*a) // IntScheme : Interpolation scheme for potential // --> currently supported schemes include // INT_CQUAD : conservative quadratic interpolation // INT_QUAD : quadratic interpolation // P5_Gradient : Use 5-points stencil to evaluate the potential gradient // ELBDM_Eta : Particle mass / Planck constant in ELBDM // ELBDM_Lambda : Quartic self-interaction coefficient in ELBDM // Poisson : true --> invoke the Poisson solver // GraAcc : true --> invoke the Gravity solver // GPU_NStream : Number of CUDA streams for the asynchronous memory copy // GravityType : Types of gravity --> self-gravity, external gravity, both // TimeNew : Physical time at the current step (for the external gravity solver) // TimeOld : Physical time at the previous step (for the external gravity solver in UNSPLIT_GRAVITY) // ExtPot : Add the external potential // MinEint : Minimum allowed internal energy (== MIN_PRES / (GAMMA-1)) // // Useless parameters in HYDRO : ELBDM_Eta, ELBDM_Lambda // Useless parameters in ELBDM : P5_Gradient //------------------------------------------------------------------------------------------------------- void CUAPI_Asyn_PoissonGravitySolver( const real h_Rho_Array [][RHO_NXT][RHO_NXT][RHO_NXT], const real h_Pot_Array_In [][POT_NXT][POT_NXT][POT_NXT], real h_Pot_Array_Out[][GRA_NXT][GRA_NXT][GRA_NXT], real h_Flu_Array [][GRA_NIN][PS1][PS1][PS1], const double h_Corner_Array[][3], const real h_Pot_Array_USG[][USG_NXT_G][USG_NXT_G][USG_NXT_G], const real h_Flu_Array_USG[][GRA_NIN-1][PS1][PS1][PS1], char h_DE_Array [][PS1][PS1][PS1], const int NPatchGroup, const real dt, const real dh, const int SOR_Min_Iter, const int SOR_Max_Iter, const real SOR_Omega, const int MG_Max_Iter, const int MG_NPre_Smooth, const int MG_NPost_Smooth, const real MG_Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme, const bool P5_Gradient, const real ELBDM_Eta, const real ELBDM_Lambda, const bool Poisson, const bool GraAcc, const int GPU_NStream, const OptGravityType_t GravityType, const double TimeNew, const double TimeOld, const bool ExtPot, const real MinEint ) { // model-independent constants # if ( POT_SCHEME == SOR ) const dim3 Poi_Block_Dim( RHO_NXT/2, RHO_NXT, POT_BLOCK_SIZE_Z ); # elif ( POT_SCHEME == MG ) const dim3 Poi_Block_Dim( POT_BLOCK_SIZE_X, 1, 1 ); # endif const dim3 Gra_Block_Dim( PATCH_SIZE, PATCH_SIZE, GRA_BLOCK_SIZE_Z ); const int NPatch = NPatchGroup*8; # if ( POT_SCHEME == SOR ) const real Poi_Const = Poi_Coeff*dh*dh; const real SOR_Omega_6 = SOR_Omega/6.0; # endif // model-dependent constants # if ( MODEL == HYDRO ) const real Gra_Const = ( P5_Gradient ) ? -dt/(12.0*dh) : -dt/( 2.0*dh); # elif ( MODEL == MHD ) # warning : WAIT MHD !!! # elif ( MODEL == ELBDM ) const real ELBDM_EtaDt = ELBDM_Eta*dt; # else # error : ERROR : unsupported MODEL !! # endif // check # if ( MODEL == ELBDM && !defined STORE_POT_GHOST && GRA_GHOST_SIZE != 0 ) # warning : WARNING : GRA_GHOST_SIZE != 0 in ELBDM (without STORE_POT_GHOST) !! # endif # ifdef GAMER_DEBUG const int Poi_NThread = Poi_Block_Dim.x * Poi_Block_Dim.y * Poi_Block_Dim.z; // minimum number of threads for spatial interpolation if ( Poisson && Poi_NThread < (POT_NXT-2)*(POT_NXT-2) ) Aux_Error( ERROR_INFO, "Poi_NThread (%d) < (POT_NXT-2)*(POT_NXT-2) (%d) !!\n", Poi_NThread, (POT_NXT-2)*(POT_NXT-2) ); // constraint due to the reduction operation in "CUPOT_Poisson_10to14cube" and "CUPOT_PoissonSolver_MG" # if ( ( POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 ) || POT_SCHEME == MG ) if ( Poisson && Poi_NThread < 64 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must >= 64) !!\n", "Poi_NThread", Poi_NThread ); # endif // constraint in "CUPOT_PoissonSolver_SOR_16to18cube" # if ( POT_SCHEME == SOR && !defined USE_PSOLVER_10TO14 ) if ( Poisson && Poi_NThread != RHO_NXT*RHO_NXT/2 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must == %d) !!\n", "Poi_NThread", Poi_NThread, RHO_NXT*RHO_NXT/2 ); # endif if ( GraAcc ) { if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot ) { if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" ); if ( d_Corner_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_G == NULL !!\n" ); } # ifdef UNSPLIT_GRAVITY if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) { if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" ); if ( d_Pot_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_G == NULL !!\n" ); } if ( h_Flu_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Flu_Array_USG == NULL !!\n" ); if ( d_Flu_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Flu_Array_USG_G == NULL !!\n" ); # endif # ifdef DUAL_ENERGY if ( h_DE_Array == NULL ) Aux_Error( ERROR_INFO, "h_DE_Array == NULL !!\n" ); if ( d_DE_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_DE_Array_G == NULL !!\n" ); # endif } # endif // #ifdef GAMER_DEBUG if ( Poisson && ( IntScheme != INT_CQUAD && IntScheme != INT_QUAD ) ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "IntScheme", IntScheme ); int *NPatch_per_Stream = new int [GPU_NStream]; int *Rho_MemSize = new int [GPU_NStream]; int *Pot_MemSize_In = new int [GPU_NStream]; int *Pot_MemSize_Out = new int [GPU_NStream]; int *Flu_MemSize = new int [GPU_NStream]; int *Corner_MemSize = new int [GPU_NStream]; int *UsedPatch = new int [GPU_NStream]; # ifdef UNSPLIT_GRAVITY int *Pot_USG_MemSize = new int [GPU_NStream]; int *Flu_USG_MemSize = new int [GPU_NStream]; # endif # ifdef DUAL_ENERGY int *DE_MemSize = new int [GPU_NStream]; # endif // set the number of patches in each stream UsedPatch[0] = 0; if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatch; else { for (int s=0; s<GPU_NStream-1; s++) { NPatch_per_Stream[s] = NPatch/GPU_NStream; UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s]; } NPatch_per_Stream[GPU_NStream-1] = NPatch - UsedPatch[GPU_NStream-1]; } // set the size of data to be transferred into GPU in each stream for (int s=0; s<GPU_NStream; s++) { Rho_MemSize [s] = NPatch_per_Stream[s]*CUBE(RHO_NXT )*sizeof(real); Pot_MemSize_In [s] = NPatch_per_Stream[s]*CUBE(POT_NXT )*sizeof(real); Pot_MemSize_Out[s] = NPatch_per_Stream[s]*CUBE(GRA_NXT )*sizeof(real); Flu_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*GRA_NIN; Corner_MemSize [s] = NPatch_per_Stream[s]*3 *sizeof(double); # ifdef UNSPLIT_GRAVITY Pot_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(USG_NXT_G)*sizeof(real); Flu_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*(GRA_NIN-1); # endif # ifdef DUAL_ENERGY DE_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(char); # endif } // a. copy data from host to device //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; if ( Poisson ) { CUDA_CHECK_ERROR( hipMemcpyAsync( d_Rho_Array_P + UsedPatch[s], h_Rho_Array + UsedPatch[s], Rho_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( hipMemcpyAsync( d_Pot_Array_P_In + UsedPatch[s], h_Pot_Array_In + UsedPatch[s], Pot_MemSize_In[s], hipMemcpyHostToDevice, Stream[s] ) ); } if ( GraAcc ) { if ( ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) && !Poisson ) CUDA_CHECK_ERROR( hipMemcpyAsync( d_Pot_Array_P_Out + UsedPatch[s], h_Pot_Array_Out + UsedPatch[s], Pot_MemSize_Out[s], hipMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( hipMemcpyAsync( d_Flu_Array_G + UsedPatch[s], h_Flu_Array + UsedPatch[s], Flu_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot ) CUDA_CHECK_ERROR( hipMemcpyAsync( d_Corner_Array_G + UsedPatch[s], h_Corner_Array + UsedPatch[s], Corner_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); # ifdef UNSPLIT_GRAVITY if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) CUDA_CHECK_ERROR( hipMemcpyAsync( d_Pot_Array_USG_G + UsedPatch[s], h_Pot_Array_USG + UsedPatch[s], Pot_USG_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( hipMemcpyAsync( d_Flu_Array_USG_G + UsedPatch[s], h_Flu_Array_USG + UsedPatch[s], Flu_USG_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); # endif # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( hipMemcpyAsync( d_DE_Array_G + UsedPatch[s], h_DE_Array + UsedPatch[s], DE_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); # endif } // if ( GraAcc ) } // for (int s=0; s<GPU_NStream; s++) // b. execute the kernel //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; // b1. Poisson solver if ( Poisson ) { # if ( POT_SCHEME == SOR ) # ifdef USE_PSOLVER_10TO14 hipLaunchKernelGGL(( CUPOT_PoissonSolver_SOR_10to14cube) , dim3(NPatch_per_Stream[s]), dim3(Poi_Block_Dim), 0, Stream[s] , d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme ); # else hipLaunchKernelGGL(( CUPOT_PoissonSolver_SOR_16to18cube) , dim3(NPatch_per_Stream[s]), dim3(Poi_Block_Dim), 0, Stream[s] , d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme ); # endif // #ifdef USE_PSOLVER_10TO14 ... else ... # elif ( POT_SCHEME == MG ) hipLaunchKernelGGL(( CUPOT_PoissonSolver_MG) , dim3(NPatch_per_Stream[s]), dim3(Poi_Block_Dim), 0, Stream[s] , d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], dh, MG_Max_Iter, MG_NPre_Smooth, MG_NPost_Smooth, MG_Tolerated_Error, Poi_Coeff, IntScheme ); # else # error : unsupported GPU Poisson solver # endif // POT_SCHEME } // if ( Poisson ) // b2. Gravity solver if ( GraAcc ) { # if ( MODEL == HYDRO ) hipLaunchKernelGGL(( CUPOT_HydroGravitySolver) , dim3(NPatch_per_Stream[s]), dim3(Gra_Block_Dim), 0, Stream[s] , d_Flu_Array_G + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], d_Corner_Array_G + UsedPatch[s], d_Pot_Array_USG_G + UsedPatch[s], d_Flu_Array_USG_G + UsedPatch[s], d_DE_Array_G + UsedPatch[s], Gra_Const, P5_Gradient, GravityType, TimeNew, TimeOld, dt, dh, MinEint ); # elif ( MODEL == MHD ) # warning : WAITH MHD !!! # elif ( MODEL == ELBDM ) hipLaunchKernelGGL(( CUPOT_ELBDMGravitySolver) , dim3(NPatch_per_Stream[s]), dim3(Gra_Block_Dim), 0, Stream[s] , d_Flu_Array_G + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], d_Corner_Array_G + UsedPatch[s], ELBDM_EtaDt, dh, ELBDM_Lambda, ExtPot, TimeNew ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL } // if ( GraAcc ) CUDA_CHECK_ERROR( hipGetLastError() ); } // for (int s=0; s<GPU_NStream; s++) // c. copy data from device to host //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; if ( Poisson ) CUDA_CHECK_ERROR( hipMemcpyAsync( h_Pot_Array_Out + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], Pot_MemSize_Out[s], hipMemcpyDeviceToHost, Stream[s] ) ); if ( GraAcc ) { CUDA_CHECK_ERROR( hipMemcpyAsync( h_Flu_Array + UsedPatch[s], d_Flu_Array_G + UsedPatch[s], Flu_MemSize[s], hipMemcpyDeviceToHost, Stream[s] ) ); # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( hipMemcpyAsync( h_DE_Array + UsedPatch[s], d_DE_Array_G + UsedPatch[s], DE_MemSize[s], hipMemcpyDeviceToHost, Stream[s] ) ); # endif } } // for (int s=0; s<GPU_NStream; s++) delete [] NPatch_per_Stream; delete [] Rho_MemSize; delete [] Pot_MemSize_In; delete [] Pot_MemSize_Out; delete [] Flu_MemSize; delete [] Corner_MemSize; delete [] UsedPatch; # ifdef UNSPLIT_GRAVITY delete [] Pot_USG_MemSize; delete [] Flu_USG_MemSize; # endif # ifdef DUAL_ENERGY delete [] DE_MemSize; # endif } // FUNCTION : CUAPI_Asyn_PoissonGravitySolver #endif // #if ( defined GPU && defined GRAVITY )
f2838b5d5ab482a118a5e213ed1f7b2186d11052.cu
#include "GAMER.h" #include "CUPOT.h" #if ( defined GPU && defined GRAVITY ) // Poisson solver prototypes #if ( POT_SCHEME == SOR ) #ifdef USE_PSOLVER_10TO14 __global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #else __global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #endif // #ifdef USE_PSOLVER_10TO14 ... else ... #elif ( POT_SCHEME == MG ) __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ); #endif // POT_SCHEME // Gravity solver prototypes #if ( MODEL == HYDRO ) __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array_New[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real g_Pot_Array_USG[][ USG_NXT_G*USG_NXT_G*USG_NXT_G ], const real g_Flu_Array_USG[][GRA_NIN-1][ PS1*PS1*PS1 ], char g_DE_Array[][ PS1*PS1*PS1 ], const real Gra_Const, const bool P5_Gradient, const OptGravityType_t GravityType, const double TimeNew, const double TimeOld, const real dt, const real dh, const real MinEint ); #elif ( MODEL == MHD ) #warning : WAIT MHD !!! #elif ( MODEL == ELBDM ) __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real EtaDt, const real dh, const real Lambda, const bool ExtPot, const double TimeNew ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL // declare all device pointers extern real (*d_Rho_Array_P )[ RHO_NXT*RHO_NXT*RHO_NXT ]; extern real (*d_Pot_Array_P_In )[ POT_NXT*POT_NXT*POT_NXT ]; extern real (*d_Pot_Array_P_Out)[ GRA_NXT*GRA_NXT*GRA_NXT ]; extern real (*d_Flu_Array_G )[GRA_NIN][ PS1*PS1*PS1 ]; extern double (*d_Corner_Array_G)[3]; #if ( MODEL == HYDRO || MODEL == MHD ) #ifdef UNSPLIT_GRAVITY extern real (*d_Pot_Array_USG_G)[ USG_NXT_G*USG_NXT_G*USG_NXT_G ]; extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ PS1*PS1*PS1 ]; #else static real (*d_Pot_Array_USG_G)[ USG_NXT_G*USG_NXT_G*USG_NXT_G ] = NULL; static real (*d_Flu_Array_USG_G)[GRA_NIN-1][ PS1*PS1*PS1 ] = NULL; #endif #ifdef DUAL_ENERGY extern char (*d_DE_Array_G)[ PS1*PS1*PS1 ]; #else static char (*d_DE_Array_G)[ PS1*PS1*PS1 ] = NULL; #endif #endif // #if ( MODEL == HYDRO || MODEL == MHD ) extern cudaStream_t *Stream; //------------------------------------------------------------------------------------------------------- // Function : CUAPI_Asyn_PoissonGravitySolver // Description : Invoke the CUPOT_PoissonSolver_XXtoXXcube and/or CUPOT_GravitySolver kernel(s) to evaluate // the gravitational potential and/or advance the fluid variables by the gravitational // acceleration for a group of patches // // *********************************************************** // ** Asynchronous Function ** // ** ** // ** will return before the execution in GPU is complete ** // *********************************************************** // // Note : a. Use streams for the asychronous memory copy between device and host // b. Prefix "d" : for pointers pointing to the "Device" memory space // Prefix "h" : for pointers pointing to the "Host" memory space // // Parameter : h_Rho_Array : Host array storing the input density // h_Pot_Array_In : Host array storing the input "coarse-grid" potential for interpolation // h_Pot_Array_Out : Host array to store the output potential // h_Flu_Array : Host array to store the fluid variables for the Gravity solver // h_Corner_Array : Host array storing the physical corner coordinates of each patch // h_Pot_Array_USG : Host array storing the prepared potential for UNSPLIT_GRAVITY // h_Flu_Array_USG : Host array storing the prepared density + momentum for UNSPLIT_GRAVITY // h_DE_Array : Host array storing the dual-energy status (for both input and output) // NPatchGroup : Number of patch groups evaluated simultaneously by GPU // dt : Time interval to advance solution // dh : Grid size // SOR_Min_Iter : Minimum # of iterations for SOR // SOR_Max_Iter : Maximum # of iterations for SOR // SOR_Omega : Over-relaxation parameter // MG_Max_Iter : Maximum number of iterations for multigrid // MG_NPre_Smooth : Number of pre-smoothing steps for multigrid // MG_NPos_tSmooth : Number of post-smoothing steps for multigrid // MG_Tolerated_Error : Maximum tolerated error for multigrid // Poi_Coeff : Coefficient in front of density in the Poisson equation (4*Pi*Newton_G*a) // IntScheme : Interpolation scheme for potential // --> currently supported schemes include // INT_CQUAD : conservative quadratic interpolation // INT_QUAD : quadratic interpolation // P5_Gradient : Use 5-points stencil to evaluate the potential gradient // ELBDM_Eta : Particle mass / Planck constant in ELBDM // ELBDM_Lambda : Quartic self-interaction coefficient in ELBDM // Poisson : true --> invoke the Poisson solver // GraAcc : true --> invoke the Gravity solver // GPU_NStream : Number of CUDA streams for the asynchronous memory copy // GravityType : Types of gravity --> self-gravity, external gravity, both // TimeNew : Physical time at the current step (for the external gravity solver) // TimeOld : Physical time at the previous step (for the external gravity solver in UNSPLIT_GRAVITY) // ExtPot : Add the external potential // MinEint : Minimum allowed internal energy (== MIN_PRES / (GAMMA-1)) // // Useless parameters in HYDRO : ELBDM_Eta, ELBDM_Lambda // Useless parameters in ELBDM : P5_Gradient //------------------------------------------------------------------------------------------------------- void CUAPI_Asyn_PoissonGravitySolver( const real h_Rho_Array [][RHO_NXT][RHO_NXT][RHO_NXT], const real h_Pot_Array_In [][POT_NXT][POT_NXT][POT_NXT], real h_Pot_Array_Out[][GRA_NXT][GRA_NXT][GRA_NXT], real h_Flu_Array [][GRA_NIN][PS1][PS1][PS1], const double h_Corner_Array[][3], const real h_Pot_Array_USG[][USG_NXT_G][USG_NXT_G][USG_NXT_G], const real h_Flu_Array_USG[][GRA_NIN-1][PS1][PS1][PS1], char h_DE_Array [][PS1][PS1][PS1], const int NPatchGroup, const real dt, const real dh, const int SOR_Min_Iter, const int SOR_Max_Iter, const real SOR_Omega, const int MG_Max_Iter, const int MG_NPre_Smooth, const int MG_NPost_Smooth, const real MG_Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme, const bool P5_Gradient, const real ELBDM_Eta, const real ELBDM_Lambda, const bool Poisson, const bool GraAcc, const int GPU_NStream, const OptGravityType_t GravityType, const double TimeNew, const double TimeOld, const bool ExtPot, const real MinEint ) { // model-independent constants # if ( POT_SCHEME == SOR ) const dim3 Poi_Block_Dim( RHO_NXT/2, RHO_NXT, POT_BLOCK_SIZE_Z ); # elif ( POT_SCHEME == MG ) const dim3 Poi_Block_Dim( POT_BLOCK_SIZE_X, 1, 1 ); # endif const dim3 Gra_Block_Dim( PATCH_SIZE, PATCH_SIZE, GRA_BLOCK_SIZE_Z ); const int NPatch = NPatchGroup*8; # if ( POT_SCHEME == SOR ) const real Poi_Const = Poi_Coeff*dh*dh; const real SOR_Omega_6 = SOR_Omega/6.0; # endif // model-dependent constants # if ( MODEL == HYDRO ) const real Gra_Const = ( P5_Gradient ) ? -dt/(12.0*dh) : -dt/( 2.0*dh); # elif ( MODEL == MHD ) # warning : WAIT MHD !!! # elif ( MODEL == ELBDM ) const real ELBDM_EtaDt = ELBDM_Eta*dt; # else # error : ERROR : unsupported MODEL !! # endif // check # if ( MODEL == ELBDM && !defined STORE_POT_GHOST && GRA_GHOST_SIZE != 0 ) # warning : WARNING : GRA_GHOST_SIZE != 0 in ELBDM (without STORE_POT_GHOST) !! # endif # ifdef GAMER_DEBUG const int Poi_NThread = Poi_Block_Dim.x * Poi_Block_Dim.y * Poi_Block_Dim.z; // minimum number of threads for spatial interpolation if ( Poisson && Poi_NThread < (POT_NXT-2)*(POT_NXT-2) ) Aux_Error( ERROR_INFO, "Poi_NThread (%d) < (POT_NXT-2)*(POT_NXT-2) (%d) !!\n", Poi_NThread, (POT_NXT-2)*(POT_NXT-2) ); // constraint due to the reduction operation in "CUPOT_Poisson_10to14cube" and "CUPOT_PoissonSolver_MG" # if ( ( POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 ) || POT_SCHEME == MG ) if ( Poisson && Poi_NThread < 64 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must >= 64) !!\n", "Poi_NThread", Poi_NThread ); # endif // constraint in "CUPOT_PoissonSolver_SOR_16to18cube" # if ( POT_SCHEME == SOR && !defined USE_PSOLVER_10TO14 ) if ( Poisson && Poi_NThread != RHO_NXT*RHO_NXT/2 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must == %d) !!\n", "Poi_NThread", Poi_NThread, RHO_NXT*RHO_NXT/2 ); # endif if ( GraAcc ) { if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot ) { if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" ); if ( d_Corner_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_G == NULL !!\n" ); } # ifdef UNSPLIT_GRAVITY if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) { if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" ); if ( d_Pot_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_G == NULL !!\n" ); } if ( h_Flu_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Flu_Array_USG == NULL !!\n" ); if ( d_Flu_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Flu_Array_USG_G == NULL !!\n" ); # endif # ifdef DUAL_ENERGY if ( h_DE_Array == NULL ) Aux_Error( ERROR_INFO, "h_DE_Array == NULL !!\n" ); if ( d_DE_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_DE_Array_G == NULL !!\n" ); # endif } # endif // #ifdef GAMER_DEBUG if ( Poisson && ( IntScheme != INT_CQUAD && IntScheme != INT_QUAD ) ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "IntScheme", IntScheme ); int *NPatch_per_Stream = new int [GPU_NStream]; int *Rho_MemSize = new int [GPU_NStream]; int *Pot_MemSize_In = new int [GPU_NStream]; int *Pot_MemSize_Out = new int [GPU_NStream]; int *Flu_MemSize = new int [GPU_NStream]; int *Corner_MemSize = new int [GPU_NStream]; int *UsedPatch = new int [GPU_NStream]; # ifdef UNSPLIT_GRAVITY int *Pot_USG_MemSize = new int [GPU_NStream]; int *Flu_USG_MemSize = new int [GPU_NStream]; # endif # ifdef DUAL_ENERGY int *DE_MemSize = new int [GPU_NStream]; # endif // set the number of patches in each stream UsedPatch[0] = 0; if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatch; else { for (int s=0; s<GPU_NStream-1; s++) { NPatch_per_Stream[s] = NPatch/GPU_NStream; UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s]; } NPatch_per_Stream[GPU_NStream-1] = NPatch - UsedPatch[GPU_NStream-1]; } // set the size of data to be transferred into GPU in each stream for (int s=0; s<GPU_NStream; s++) { Rho_MemSize [s] = NPatch_per_Stream[s]*CUBE(RHO_NXT )*sizeof(real); Pot_MemSize_In [s] = NPatch_per_Stream[s]*CUBE(POT_NXT )*sizeof(real); Pot_MemSize_Out[s] = NPatch_per_Stream[s]*CUBE(GRA_NXT )*sizeof(real); Flu_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*GRA_NIN; Corner_MemSize [s] = NPatch_per_Stream[s]*3 *sizeof(double); # ifdef UNSPLIT_GRAVITY Pot_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(USG_NXT_G)*sizeof(real); Flu_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*(GRA_NIN-1); # endif # ifdef DUAL_ENERGY DE_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(char); # endif } // a. copy data from host to device //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; if ( Poisson ) { CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Rho_Array_P + UsedPatch[s], h_Rho_Array + UsedPatch[s], Rho_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_P_In + UsedPatch[s], h_Pot_Array_In + UsedPatch[s], Pot_MemSize_In[s], cudaMemcpyHostToDevice, Stream[s] ) ); } if ( GraAcc ) { if ( ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) && !Poisson ) CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_P_Out + UsedPatch[s], h_Pot_Array_Out + UsedPatch[s], Pot_MemSize_Out[s], cudaMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_G + UsedPatch[s], h_Flu_Array + UsedPatch[s], Flu_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot ) CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Corner_Array_G + UsedPatch[s], h_Corner_Array + UsedPatch[s], Corner_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); # ifdef UNSPLIT_GRAVITY if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_USG_G + UsedPatch[s], h_Pot_Array_USG + UsedPatch[s], Pot_USG_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_USG_G + UsedPatch[s], h_Flu_Array_USG + UsedPatch[s], Flu_USG_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); # endif # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( cudaMemcpyAsync( d_DE_Array_G + UsedPatch[s], h_DE_Array + UsedPatch[s], DE_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); # endif } // if ( GraAcc ) } // for (int s=0; s<GPU_NStream; s++) // b. execute the kernel //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; // b1. Poisson solver if ( Poisson ) { # if ( POT_SCHEME == SOR ) # ifdef USE_PSOLVER_10TO14 CUPOT_PoissonSolver_SOR_10to14cube <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>> ( d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme ); # else CUPOT_PoissonSolver_SOR_16to18cube <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>> ( d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme ); # endif // #ifdef USE_PSOLVER_10TO14 ... else ... # elif ( POT_SCHEME == MG ) CUPOT_PoissonSolver_MG <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>> ( d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], dh, MG_Max_Iter, MG_NPre_Smooth, MG_NPost_Smooth, MG_Tolerated_Error, Poi_Coeff, IntScheme ); # else # error : unsupported GPU Poisson solver # endif // POT_SCHEME } // if ( Poisson ) // b2. Gravity solver if ( GraAcc ) { # if ( MODEL == HYDRO ) CUPOT_HydroGravitySolver <<< NPatch_per_Stream[s], Gra_Block_Dim, 0, Stream[s] >>> ( d_Flu_Array_G + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], d_Corner_Array_G + UsedPatch[s], d_Pot_Array_USG_G + UsedPatch[s], d_Flu_Array_USG_G + UsedPatch[s], d_DE_Array_G + UsedPatch[s], Gra_Const, P5_Gradient, GravityType, TimeNew, TimeOld, dt, dh, MinEint ); # elif ( MODEL == MHD ) # warning : WAITH MHD !!! # elif ( MODEL == ELBDM ) CUPOT_ELBDMGravitySolver <<< NPatch_per_Stream[s], Gra_Block_Dim, 0, Stream[s] >>> ( d_Flu_Array_G + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], d_Corner_Array_G + UsedPatch[s], ELBDM_EtaDt, dh, ELBDM_Lambda, ExtPot, TimeNew ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL } // if ( GraAcc ) CUDA_CHECK_ERROR( cudaGetLastError() ); } // for (int s=0; s<GPU_NStream; s++) // c. copy data from device to host //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; if ( Poisson ) CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Pot_Array_Out + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], Pot_MemSize_Out[s], cudaMemcpyDeviceToHost, Stream[s] ) ); if ( GraAcc ) { CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Flu_Array + UsedPatch[s], d_Flu_Array_G + UsedPatch[s], Flu_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) ); # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( cudaMemcpyAsync( h_DE_Array + UsedPatch[s], d_DE_Array_G + UsedPatch[s], DE_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) ); # endif } } // for (int s=0; s<GPU_NStream; s++) delete [] NPatch_per_Stream; delete [] Rho_MemSize; delete [] Pot_MemSize_In; delete [] Pot_MemSize_Out; delete [] Flu_MemSize; delete [] Corner_MemSize; delete [] UsedPatch; # ifdef UNSPLIT_GRAVITY delete [] Pot_USG_MemSize; delete [] Flu_USG_MemSize; # endif # ifdef DUAL_ENERGY delete [] DE_MemSize; # endif } // FUNCTION : CUAPI_Asyn_PoissonGravitySolver #endif // #if ( defined GPU && defined GRAVITY )
627e0d66970d800a73163dbec26eaf8e28aa84d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void bcast(int arg) { int laneId = threadIdx.x & 0x1f; int value; if (laneId == 0) // Note unused variable for value = arg; // all threads except lane 0 value = __shfl(value, 0); // Get value from lane 0 if (value != arg) printf(Thread %d failed.\n, threadIdx.x); } void main() { hipLaunchKernelGGL(( bcast), dim3(1), dim3(32) , 0, 0, 1234); hipDeviceSynchronize(); }
627e0d66970d800a73163dbec26eaf8e28aa84d8.cu
__global__ void bcast(int arg) { int laneId = threadIdx.x & 0x1f; int value; if (laneId == 0) // Note unused variable for value = arg; // all threads except lane 0 value = __shfl(value, 0); // Get ˇ°valueˇ± from lane 0 if (value != arg) printf(ˇ°Thread %d failed.\nˇ±, threadIdx.x); } void main() { bcast<<< 1, 32 >>>(1234); cudaDeviceSynchronize(); }
7507c187d19b0f191b0335c89887262fdc807ef7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zlarfg.cu normal z -> c, Fri Sep 11 18:29:21 2015 @author Mark Gates */ #include "common_magma.h" #include "magma_templates.h" #define COMPLEX // 512 is maximum number of threads for CUDA capability 1.x #define NB 512 // ---------------------------------------- // CUDA kernel for magma_clarfg. // Uses one block of NB (currently 512) threads. // Each thread sums dx[ tx + k*NB ]^2 for k = 0, 1, ..., // then does parallel sum reduction to get norm-squared. // // Currently setup to use NB threads, no matter how small dx is. // This was slightly faster (5%) than passing n to magma_sum_reduce. // To use number of threads = min( NB, max( 1, n-1 )), pass n as // argument to magma_sum_reduce, rather than as template parameter. __global__ void clarfg_kernel( int n, magmaFloatComplex* dalpha, magmaFloatComplex* dx, int incx, magmaFloatComplex* dtau ) { const int tx = threadIdx.x; __shared__ float swork[ NB ]; // TODO is it faster for each thread to have its own scale (register)? // if so, communicate it via swork[0] __shared__ float sscale; __shared__ magmaFloatComplex sscale2; magmaFloatComplex tmp; // find max of [dalpha, dx], to use as scaling to avoid unnecesary under- and overflow if ( tx == 0 ) { tmp = *dalpha; #ifdef COMPLEX swork[tx] = max( fabs(real(tmp)), fabs(imag(tmp)) ); #else swork[tx] = fabs(tmp); #endif } else { swork[tx] = 0; } for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx]; #ifdef COMPLEX swork[tx] = max( swork[tx], max( fabs(real(tmp)), fabs(imag(tmp)) )); #else swork[tx] = max( swork[tx], fabs(tmp) ); #endif } magma_max_reduce< NB >( tx, swork ); if ( tx == 0 ) sscale = swork[0]; __syncthreads(); // sum norm^2 of dx/sscale // dx has length n-1 swork[tx] = 0; if ( sscale > 0 ) { for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx] / sscale; swork[tx] += real(tmp)*real(tmp) + imag(tmp)*imag(tmp); } magma_sum_reduce< NB >( tx, swork ); //magma_sum_reduce( blockDim.x, tx, swork ); } if ( tx == 0 ) { magmaFloatComplex alpha = *dalpha; if ( swork[0] == 0 && imag(alpha) == 0 ) { // H = I *dtau = MAGMA_C_ZERO; } else { // beta = norm( [dalpha, dx] ) float beta; tmp = alpha / sscale; beta = sscale * sqrt( real(tmp)*real(tmp) + imag(tmp)*imag(tmp) + swork[0] ); beta = -copysign( beta, real(alpha) ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_C_MAKE( (beta - real(alpha)) / beta, -imag(alpha) / beta ); *dalpha = MAGMA_C_MAKE( beta, 0 ); sscale2 = 1 / (alpha - beta); } } // scale x (if norm was not 0) __syncthreads(); if ( swork[0] != 0 ) { for( int j = tx; j < n-1; j += NB ) { dx[j*incx] *= sscale2; } } } /** Purpose ------- CLARFG generates a complex elementary reflector (Householder matrix) H of order n, such that H * ( alpha ) = ( beta ), H**H * H = I. ( x ) ( 0 ) where alpha and beta are scalars, with beta real and beta = norm([alpha, x]), and x is an (n-1)-element complex vector. H is represented in the form H = I - tau * ( 1 ) * ( 1 v**H ), ( v ) where tau is a complex scalar and v is a complex (n-1)-element vector. Note that H is not Hermitian. If the elements of x are all zero and dalpha is real, then tau = 0 and H is taken to be the unit matrix. Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1. Arguments --------- @param[in] n INTEGER The order of the elementary reflector. @param[in,out] dalpha COMPLEX* on the GPU. On entry, pointer to the value alpha, i.e., the first entry of the vector. On exit, it is overwritten with the value beta. @param[in,out] dx COMPLEX array, dimension (1+(N-2)*abs(INCX)), on the GPU On entry, the (n-1)-element vector x. On exit, it is overwritten with the vector v. @param[in] incx INTEGER The increment between elements of X. INCX > 0. @param[out] dtau COMPLEX* on the GPU. Pointer to the value tau. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux1 ********************************************************************/ extern "C" void magmablas_clarfg_q( magma_int_t n, magmaFloatComplex_ptr dalpha, magmaFloatComplex_ptr dx, magma_int_t incx, magmaFloatComplex_ptr dtau, magma_queue_t queue ) { dim3 blocks( 1 ); dim3 threads( NB ); //dim3 threads( min( NB, max( n-1, 1 ))); hipLaunchKernelGGL(( clarfg_kernel), dim3(blocks), dim3(threads), 0, queue , n, dalpha, dx, incx, dtau ); } /** @see magmablas_clarfg_q @ingroup magma_caux1 ********************************************************************/ extern "C" void magmablas_clarfg( magma_int_t n, magmaFloatComplex_ptr dalpha, magmaFloatComplex_ptr dx, magma_int_t incx, magmaFloatComplex_ptr dtau ) { dim3 blocks( 1 ); dim3 threads( NB ); //dim3 threads( min( NB, max( n-1, 1 ))); hipLaunchKernelGGL(( clarfg_kernel), dim3(blocks), dim3(threads) , 0, 0, n, dalpha, dx, incx, dtau ); }
7507c187d19b0f191b0335c89887262fdc807ef7.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zlarfg.cu normal z -> c, Fri Sep 11 18:29:21 2015 @author Mark Gates */ #include "common_magma.h" #include "magma_templates.h" #define COMPLEX // 512 is maximum number of threads for CUDA capability 1.x #define NB 512 // ---------------------------------------- // CUDA kernel for magma_clarfg. // Uses one block of NB (currently 512) threads. // Each thread sums dx[ tx + k*NB ]^2 for k = 0, 1, ..., // then does parallel sum reduction to get norm-squared. // // Currently setup to use NB threads, no matter how small dx is. // This was slightly faster (5%) than passing n to magma_sum_reduce. // To use number of threads = min( NB, max( 1, n-1 )), pass n as // argument to magma_sum_reduce, rather than as template parameter. __global__ void clarfg_kernel( int n, magmaFloatComplex* dalpha, magmaFloatComplex* dx, int incx, magmaFloatComplex* dtau ) { const int tx = threadIdx.x; __shared__ float swork[ NB ]; // TODO is it faster for each thread to have its own scale (register)? // if so, communicate it via swork[0] __shared__ float sscale; __shared__ magmaFloatComplex sscale2; magmaFloatComplex tmp; // find max of [dalpha, dx], to use as scaling to avoid unnecesary under- and overflow if ( tx == 0 ) { tmp = *dalpha; #ifdef COMPLEX swork[tx] = max( fabs(real(tmp)), fabs(imag(tmp)) ); #else swork[tx] = fabs(tmp); #endif } else { swork[tx] = 0; } for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx]; #ifdef COMPLEX swork[tx] = max( swork[tx], max( fabs(real(tmp)), fabs(imag(tmp)) )); #else swork[tx] = max( swork[tx], fabs(tmp) ); #endif } magma_max_reduce< NB >( tx, swork ); if ( tx == 0 ) sscale = swork[0]; __syncthreads(); // sum norm^2 of dx/sscale // dx has length n-1 swork[tx] = 0; if ( sscale > 0 ) { for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx] / sscale; swork[tx] += real(tmp)*real(tmp) + imag(tmp)*imag(tmp); } magma_sum_reduce< NB >( tx, swork ); //magma_sum_reduce( blockDim.x, tx, swork ); } if ( tx == 0 ) { magmaFloatComplex alpha = *dalpha; if ( swork[0] == 0 && imag(alpha) == 0 ) { // H = I *dtau = MAGMA_C_ZERO; } else { // beta = norm( [dalpha, dx] ) float beta; tmp = alpha / sscale; beta = sscale * sqrt( real(tmp)*real(tmp) + imag(tmp)*imag(tmp) + swork[0] ); beta = -copysign( beta, real(alpha) ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_C_MAKE( (beta - real(alpha)) / beta, -imag(alpha) / beta ); *dalpha = MAGMA_C_MAKE( beta, 0 ); sscale2 = 1 / (alpha - beta); } } // scale x (if norm was not 0) __syncthreads(); if ( swork[0] != 0 ) { for( int j = tx; j < n-1; j += NB ) { dx[j*incx] *= sscale2; } } } /** Purpose ------- CLARFG generates a complex elementary reflector (Householder matrix) H of order n, such that H * ( alpha ) = ( beta ), H**H * H = I. ( x ) ( 0 ) where alpha and beta are scalars, with beta real and beta = ±norm([alpha, x]), and x is an (n-1)-element complex vector. H is represented in the form H = I - tau * ( 1 ) * ( 1 v**H ), ( v ) where tau is a complex scalar and v is a complex (n-1)-element vector. Note that H is not Hermitian. If the elements of x are all zero and dalpha is real, then tau = 0 and H is taken to be the unit matrix. Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1. Arguments --------- @param[in] n INTEGER The order of the elementary reflector. @param[in,out] dalpha COMPLEX* on the GPU. On entry, pointer to the value alpha, i.e., the first entry of the vector. On exit, it is overwritten with the value beta. @param[in,out] dx COMPLEX array, dimension (1+(N-2)*abs(INCX)), on the GPU On entry, the (n-1)-element vector x. On exit, it is overwritten with the vector v. @param[in] incx INTEGER The increment between elements of X. INCX > 0. @param[out] dtau COMPLEX* on the GPU. Pointer to the value tau. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux1 ********************************************************************/ extern "C" void magmablas_clarfg_q( magma_int_t n, magmaFloatComplex_ptr dalpha, magmaFloatComplex_ptr dx, magma_int_t incx, magmaFloatComplex_ptr dtau, magma_queue_t queue ) { dim3 blocks( 1 ); dim3 threads( NB ); //dim3 threads( min( NB, max( n-1, 1 ))); clarfg_kernel<<< blocks, threads, 0, queue >>>( n, dalpha, dx, incx, dtau ); } /** @see magmablas_clarfg_q @ingroup magma_caux1 ********************************************************************/ extern "C" void magmablas_clarfg( magma_int_t n, magmaFloatComplex_ptr dalpha, magmaFloatComplex_ptr dx, magma_int_t incx, magmaFloatComplex_ptr dtau ) { dim3 blocks( 1 ); dim3 threads( NB ); //dim3 threads( min( NB, max( n-1, 1 ))); clarfg_kernel<<< blocks, threads >>>( n, dalpha, dx, incx, dtau ); }
4e8dd2613671dddf3d7e5abdfc60db5e4c550b81.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/assert_impl.cuh" #include <stdio.h> #include <stdint.h> #include <hip/hip_runtime.h> #include "mindspore/core/mindapi/base/type_id.h" #include "include/hip/hip_fp16.h" __device__ __forceinline__ void PrintData(float *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%f ", input[i]); } printf("%f]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(half *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%f ", __half2float(input[i])); } printf("%f]\n", __half2float(input[summarize - 1])); } __device__ __forceinline__ void PrintData(double *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%lf ", input[i]); } printf("%lf]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(int64_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%lld ", input[i]); } printf("%lld]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(int32_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%d ", input[i]); } printf("%d]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(int16_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%d ", static_cast<int32_t>(input[i])); } printf("%d]\n", static_cast<int32_t>(input[summarize - 1])); } __device__ __forceinline__ void PrintData(int8_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%d ", static_cast<int32_t>(input[i])); } printf("%d]\n", static_cast<int32_t>(input[summarize - 1])); } __device__ __forceinline__ void PrintData(uint64_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%lu ", input[i]); } printf("%lu]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(uint32_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%u ", input[i]); } printf("%u]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(uint16_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%u ", static_cast<uint32_t>(input[i])); } printf("%u]\n", static_cast<uint32_t>(input[summarize - 1])); } __device__ __forceinline__ void PrintData(uint8_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%u ", static_cast<uint32_t>(input[i])); } printf("%u]\n", static_cast<uint32_t>(input[summarize - 1])); } __device__ __forceinline__ void PrintData(bool *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%d ", input[i]); } printf("%d]\n", input[summarize - 1]); } __global__ void CalculateAssertKernel(const bool *cond, void **inputs, int *summarizes, int *types, const size_t input_num) { if (cond[0]) { return; } printf("For 'Assert' condition is false.\n"); for (size_t i = 0; i < input_num; i++) { printf("input data: ["); switch (types[i]) { case mindspore::kNumberTypeFloat32: PrintData(static_cast<float *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeFloat16: PrintData(static_cast<half *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeFloat64: PrintData(static_cast<double *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeInt32: PrintData(static_cast<int32_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeInt64: PrintData(static_cast<int64_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeInt16: PrintData(static_cast<int16_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeInt8: PrintData(static_cast<int8_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeUInt32: PrintData(static_cast<uint32_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeUInt64: PrintData(static_cast<uint64_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeUInt16: PrintData(static_cast<uint16_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeUInt8: PrintData(static_cast<uint8_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeBool: PrintData(static_cast<bool *>(inputs[i]), summarizes[i]); break; default: printf("unsupported data type, typeid %d]\n", types[i]); break; } } return; } void AssertKernel(const bool *cond, void **inputs, int *summarizes, int *types, const size_t input_num, const uint32_t device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( CalculateAssertKernel), dim3(1), dim3(1), 0, cuda_stream, cond, inputs, summarizes, types, input_num); return; }
4e8dd2613671dddf3d7e5abdfc60db5e4c550b81.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/assert_impl.cuh" #include <stdio.h> #include <stdint.h> #include <cuda_runtime.h> #include "mindspore/core/mindapi/base/type_id.h" #include "include/cuda_fp16.h" __device__ __forceinline__ void PrintData(float *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%f ", input[i]); } printf("%f]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(half *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%f ", __half2float(input[i])); } printf("%f]\n", __half2float(input[summarize - 1])); } __device__ __forceinline__ void PrintData(double *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%lf ", input[i]); } printf("%lf]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(int64_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%lld ", input[i]); } printf("%lld]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(int32_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%d ", input[i]); } printf("%d]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(int16_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%d ", static_cast<int32_t>(input[i])); } printf("%d]\n", static_cast<int32_t>(input[summarize - 1])); } __device__ __forceinline__ void PrintData(int8_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%d ", static_cast<int32_t>(input[i])); } printf("%d]\n", static_cast<int32_t>(input[summarize - 1])); } __device__ __forceinline__ void PrintData(uint64_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%lu ", input[i]); } printf("%lu]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(uint32_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%u ", input[i]); } printf("%u]\n", input[summarize - 1]); } __device__ __forceinline__ void PrintData(uint16_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%u ", static_cast<uint32_t>(input[i])); } printf("%u]\n", static_cast<uint32_t>(input[summarize - 1])); } __device__ __forceinline__ void PrintData(uint8_t *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%u ", static_cast<uint32_t>(input[i])); } printf("%u]\n", static_cast<uint32_t>(input[summarize - 1])); } __device__ __forceinline__ void PrintData(bool *input, int summarize) { for (int i = 0; i < summarize - 1; i++) { printf("%d ", input[i]); } printf("%d]\n", input[summarize - 1]); } __global__ void CalculateAssertKernel(const bool *cond, void **inputs, int *summarizes, int *types, const size_t input_num) { if (cond[0]) { return; } printf("For 'Assert' condition is false.\n"); for (size_t i = 0; i < input_num; i++) { printf("input data: ["); switch (types[i]) { case mindspore::kNumberTypeFloat32: PrintData(static_cast<float *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeFloat16: PrintData(static_cast<half *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeFloat64: PrintData(static_cast<double *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeInt32: PrintData(static_cast<int32_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeInt64: PrintData(static_cast<int64_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeInt16: PrintData(static_cast<int16_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeInt8: PrintData(static_cast<int8_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeUInt32: PrintData(static_cast<uint32_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeUInt64: PrintData(static_cast<uint64_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeUInt16: PrintData(static_cast<uint16_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeUInt8: PrintData(static_cast<uint8_t *>(inputs[i]), summarizes[i]); break; case mindspore::kNumberTypeBool: PrintData(static_cast<bool *>(inputs[i]), summarizes[i]); break; default: printf("unsupported data type, typeid %d]\n", types[i]); break; } } return; } void AssertKernel(const bool *cond, void **inputs, int *summarizes, int *types, const size_t input_num, const uint32_t device_id, cudaStream_t cuda_stream) { CalculateAssertKernel<<<1, 1, 0, cuda_stream>>>(cond, inputs, summarizes, types, input_num); return; }
e2193f14c96a3a6d837a38f337868a55ed08f8dc.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime_api.h> int main() { hipError_t error; hipDeviceProp_t dev; int dev_cnt = 0; // return device numbers with compute capability >= 1.0 error = hipGetDeviceCount (&dev_cnt); if(error != hipSuccess) { printf("Error: %s\n", hipGetErrorString(error)); exit(-1); } printf("Number of devices: %d\n",dev_cnt); // Get properties of each device for(int i = 0; i < dev_cnt; i++) { error = hipGetDeviceProperties(&dev, i); if(error != hipSuccess) { printf("Error: %s\n", hipGetErrorString(error)); exit(-1); } printf("\nDevice %d:\n", i); printf("name: %s\n",dev.name); printf("Compute capability %d.%d\n",dev.major, dev.minor); printf("total global memory(KB): %ld\n", dev.totalGlobalMem/1024); printf("shared mem per block: %d\n",dev.sharedMemPerBlock); printf("regs per block: %d\n", dev.regsPerBlock); printf("warp size: %d\n", dev.warpSize); printf("max threads per block: %d\n",dev.maxThreadsPerBlock); printf("max thread dim z:%d y:%d x:%d\n", dev.maxThreadsDim[0], dev.maxThreadsDim[1], dev.maxThreadsDim[2]); printf("max grid size z:%d y:%d x:%d\n", dev.maxGridSize[0],dev.maxGridSize[1], dev.maxGridSize[2]); printf("clock rate(KHz):\n",dev.clockRate); printf("total constant memory (bytes): %ld\n",dev.totalConstMem); printf("multiprocessor count %d\n",dev.multiProcessorCount); printf("integrated: %d\n",dev.integrated); printf("async engine count: %d\n",dev.asyncEngineCount); printf("memory bus width: %d\n",dev.memoryBusWidth); printf("memory clock rate (KHz): %d\n",dev.memoryClockRate); printf("L2 cache size (bytes): %d\n", dev.l2CacheSize); printf("max threads per SM: %d\n", dev.maxThreadsPerMultiProcessor); printf("Texture alignment: %d\n", dev.textureAlignment); } }
e2193f14c96a3a6d837a38f337868a55ed08f8dc.cu
#include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <cuda_profiler_api.h> int main() { cudaError_t error; cudaDeviceProp dev; int dev_cnt = 0; // return device numbers with compute capability >= 1.0 error = cudaGetDeviceCount (&dev_cnt); if(error != cudaSuccess) { printf("Error: %s\n", cudaGetErrorString(error)); exit(-1); } printf("Number of devices: %d\n",dev_cnt); // Get properties of each device for(int i = 0; i < dev_cnt; i++) { error = cudaGetDeviceProperties(&dev, i); if(error != cudaSuccess) { printf("Error: %s\n", cudaGetErrorString(error)); exit(-1); } printf("\nDevice %d:\n", i); printf("name: %s\n",dev.name); printf("Compute capability %d.%d\n",dev.major, dev.minor); printf("total global memory(KB): %ld\n", dev.totalGlobalMem/1024); printf("shared mem per block: %d\n",dev.sharedMemPerBlock); printf("regs per block: %d\n", dev.regsPerBlock); printf("warp size: %d\n", dev.warpSize); printf("max threads per block: %d\n",dev.maxThreadsPerBlock); printf("max thread dim z:%d y:%d x:%d\n", dev.maxThreadsDim[0], dev.maxThreadsDim[1], dev.maxThreadsDim[2]); printf("max grid size z:%d y:%d x:%d\n", dev.maxGridSize[0],dev.maxGridSize[1], dev.maxGridSize[2]); printf("clock rate(KHz):\n",dev.clockRate); printf("total constant memory (bytes): %ld\n",dev.totalConstMem); printf("multiprocessor count %d\n",dev.multiProcessorCount); printf("integrated: %d\n",dev.integrated); printf("async engine count: %d\n",dev.asyncEngineCount); printf("memory bus width: %d\n",dev.memoryBusWidth); printf("memory clock rate (KHz): %d\n",dev.memoryClockRate); printf("L2 cache size (bytes): %d\n", dev.l2CacheSize); printf("max threads per SM: %d\n", dev.maxThreadsPerMultiProcessor); printf("Texture alignment: %d\n", dev.textureAlignment); } }
0add8ef885de0f1e1f1d44ae7fbe9c73e65331cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; namespace plat = paddle::platform; namespace paddle { namespace operators { template <typename T> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, T> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { DivRangeFunctor<T> functor(x->data<T>(), y->data<T>(), z->data<T>()); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, x->numel()); for_range(functor); } }; template <> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, platform::float16> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { auto size = x->numel(); dim3 grid_size = dim3(((size + 1) / 2 + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); const half* x2 = reinterpret_cast<const half*>(x->data<platform::float16>()); const half* y2 = reinterpret_cast<const half*>(y->data<platform::float16>()); half* z2 = reinterpret_cast<half*>(z->data<platform::float16>()); hipLaunchKernelGGL(( SameDimsElemwiseDivCUDAKernel), dim3(grid_size), dim3(block_size), 0, ctx.template device_context<platform::CUDADeviceContext>().stream(), x2, y2, z2, size); } }; template <typename T> static __global__ void SimpleElemwiseDivGradCUDAKernel(const T* x, const T* y, const T* out, const T* dout, int64_t size, T* dx, T* dy) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < size) { T o = dout[col]; dx[col] = o / y[col]; dy[col] = -o * out[col] / y[col]; col += blockDim.x * gridDim.x; } } template <typename DeviceContext, typename T> typename std::enable_if< std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type elementwise_div_grad(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, const framework::Tensor* dout, framework::Tensor* dx, framework::Tensor* dy) { dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); auto size = x->numel(); dim3 grid_size = dim3((size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); hipLaunchKernelGGL(( SimpleElemwiseDivGradCUDAKernel< T>), dim3(grid_size), dim3(block_size), 0, ctx.template device_context<plat::CUDADeviceContext>().stream(), x->data<T>(), y->data<T>(), out->data<T>(), dout->data<T>(), size, dx->mutable_data<T>(ctx.GetPlace()), dy->mutable_data<T>(ctx.GetPlace())); } } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( elementwise_div, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad_grad, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>);
0add8ef885de0f1e1f1d44ae7fbe9c73e65331cc.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; namespace plat = paddle::platform; namespace paddle { namespace operators { template <typename T> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, T> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { DivRangeFunctor<T> functor(x->data<T>(), y->data<T>(), z->data<T>()); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, x->numel()); for_range(functor); } }; template <> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, platform::float16> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { auto size = x->numel(); dim3 grid_size = dim3(((size + 1) / 2 + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); const half* x2 = reinterpret_cast<const half*>(x->data<platform::float16>()); const half* y2 = reinterpret_cast<const half*>(y->data<platform::float16>()); half* z2 = reinterpret_cast<half*>(z->data<platform::float16>()); SameDimsElemwiseDivCUDAKernel<<< grid_size, block_size, 0, ctx.template device_context<platform::CUDADeviceContext>().stream()>>>( x2, y2, z2, size); } }; template <typename T> static __global__ void SimpleElemwiseDivGradCUDAKernel(const T* x, const T* y, const T* out, const T* dout, int64_t size, T* dx, T* dy) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < size) { T o = dout[col]; dx[col] = o / y[col]; dy[col] = -o * out[col] / y[col]; col += blockDim.x * gridDim.x; } } template <typename DeviceContext, typename T> typename std::enable_if< std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type elementwise_div_grad(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, const framework::Tensor* dout, framework::Tensor* dx, framework::Tensor* dy) { dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); auto size = x->numel(); dim3 grid_size = dim3((size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); SimpleElemwiseDivGradCUDAKernel< T><<<grid_size, block_size, 0, ctx.template device_context<plat::CUDADeviceContext>().stream()>>>( x->data<T>(), y->data<T>(), out->data<T>(), dout->data<T>(), size, dx->mutable_data<T>(ctx.GetPlace()), dy->mutable_data<T>(ctx.GetPlace())); } } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( elementwise_div, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad_grad, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>);
1b3efb6f600fa5d8f53d66d2a5f2cd24d468bc76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #define TAM 10 #define N 2 #define T 6 __global__ void sum_matrix(int** dd_mat_a,int** dd_mat_b,int** dd_mat_c, int n, int m){ int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; if( y<n && x<m ){ // revisar <<<<<<<<<<<<<<<<<<<<<<<<<<<< //*(*(dd_mat_a+y)+x)=-9; *(*(dd_mat_c+y)+x)= *(*(dd_mat_a+y)+x) + *(*(dd_mat_b+y)+x); } } void create3(int*** mat,int n, int m){ *mat = (int** )malloc(sizeof(int*)*n); (*mat)[0] = (int* )malloc(sizeof(int)*n*m); int i; for(i=1;i<n;i++){ (*mat)[i] = (*mat)[0]+i*m; } } void fill(int** mat, int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) mat[i][j] = rand()%2; } } void fill_value(int** mat,int n, int m, int value=0){ int i,j; for(i=0;i<n;i++) for(j=0;j<m;j++) mat[i][j] = value; } void print(int** mat,int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) printf("%d",mat[i][j]); printf("\n"); } } void create5(int**& mat, int**& d_mat, int**& dd_mat, int n, int m, int fillValue=-1){ int i; mat = (int** )malloc(sizeof(int*)*n); mat[0] = (int* )malloc(sizeof(int)*n*m); for(i=1;i<n;i++){ mat[i] = mat[i-1]+m; } if(fillValue==-1){ fill(mat,n,m); } else{ fill_value(mat,n,m,fillValue); } int size_row = sizeof(int*) * n; int size_col = sizeof(int ) * m; d_mat = (int**) malloc(size_row); hipMalloc((void**)& d_mat[0], sizeof(int) * m * n ); hipMemcpy(d_mat[0], mat[0], sizeof(int) * m * n ,hipMemcpyHostToDevice); for(i=1;i<n;i++){ d_mat[i]=(d_mat[i-1]+m); } hipMalloc((void***)&dd_mat,size_row); hipMemcpy(dd_mat,d_mat,size_row,hipMemcpyHostToDevice); } int main(){ if(N*T<TAM){ printf("no cubre la matriz\n"); return 0; } int n = TAM; int m = TAM; int** mat_a; int** d_mat_a; int** dd_mat_a; int** mat_b; int** d_mat_b; int** dd_mat_b; int** mat_c; int** d_mat_c; int** dd_mat_c; create5(mat_a,d_mat_a,dd_mat_a,n,m); create5(mat_b,d_mat_b,dd_mat_b,n,m); create5(mat_c,d_mat_c,dd_mat_c,n,m,0); int i; int size_row = sizeof(int*) * n; int size_col = sizeof(int ) * m; /* int** mat_a; create3(&mat_a,n,m); fill(mat_a,n,m); int **d_mat_a; int **dd_mat_a; d_mat_a = (int**) malloc(size_row); hipMalloc((void**)& d_mat_a[0], sizeof(int) * m * n ); hipMemcpy(d_mat_a[0], mat_a[0], sizeof(int) * m * n ,hipMemcpyHostToDevice); for(i=1;i<n;i++){ d_mat_a[i]=(d_mat_a[i-1]+m); } hipMalloc((void***)&dd_mat_a,size_row); hipMemcpy(dd_mat_a,d_mat_a,size_row,hipMemcpyHostToDevice); */ print(mat_a,n,m); printf("//////////////////\n"); print(mat_b,n,m); printf("//////////////////\n"); print(mat_c,n,m); printf("//////////////////\n"); printf("//////////////////\n"); dim3 grid(N,N,1); dim3 blockNum(T,T,1); hipLaunchKernelGGL(( sum_matrix), dim3(grid),dim3(blockNum), 0, 0, dd_mat_a,dd_mat_b,dd_mat_c,n,m); for(i=0;i<n;i++){ hipMemcpy(mat_c[i],d_mat_c[i],size_col,hipMemcpyDeviceToHost); } printf("//////////////////\n"); printf("//////////////////\n"); print(mat_a,n,m); printf("//////////////////\n"); print(mat_b,n,m); printf("//////////////////\n"); print(mat_c,n,m); return 0; }
1b3efb6f600fa5d8f53d66d2a5f2cd24d468bc76.cu
#include <stdlib.h> #include <stdio.h> #define TAM 10 #define N 2 #define T 6 __global__ void sum_matrix(int** dd_mat_a,int** dd_mat_b,int** dd_mat_c, int n, int m){ int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; if( y<n && x<m ){ // revisar <<<<<<<<<<<<<<<<<<<<<<<<<<<< //*(*(dd_mat_a+y)+x)=-9; *(*(dd_mat_c+y)+x)= *(*(dd_mat_a+y)+x) + *(*(dd_mat_b+y)+x); } } void create3(int*** mat,int n, int m){ *mat = (int** )malloc(sizeof(int*)*n); (*mat)[0] = (int* )malloc(sizeof(int)*n*m); int i; for(i=1;i<n;i++){ (*mat)[i] = (*mat)[0]+i*m; } } void fill(int** mat, int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) mat[i][j] = rand()%2; } } void fill_value(int** mat,int n, int m, int value=0){ int i,j; for(i=0;i<n;i++) for(j=0;j<m;j++) mat[i][j] = value; } void print(int** mat,int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) printf("%d",mat[i][j]); printf("\n"); } } void create5(int**& mat, int**& d_mat, int**& dd_mat, int n, int m, int fillValue=-1){ int i; mat = (int** )malloc(sizeof(int*)*n); mat[0] = (int* )malloc(sizeof(int)*n*m); for(i=1;i<n;i++){ mat[i] = mat[i-1]+m; } if(fillValue==-1){ fill(mat,n,m); } else{ fill_value(mat,n,m,fillValue); } int size_row = sizeof(int*) * n; int size_col = sizeof(int ) * m; d_mat = (int**) malloc(size_row); cudaMalloc((void**)& d_mat[0], sizeof(int) * m * n ); cudaMemcpy(d_mat[0], mat[0], sizeof(int) * m * n ,cudaMemcpyHostToDevice); for(i=1;i<n;i++){ d_mat[i]=(d_mat[i-1]+m); } cudaMalloc((void***)&dd_mat,size_row); cudaMemcpy(dd_mat,d_mat,size_row,cudaMemcpyHostToDevice); } int main(){ if(N*T<TAM){ printf("no cubre la matriz\n"); return 0; } int n = TAM; int m = TAM; int** mat_a; int** d_mat_a; int** dd_mat_a; int** mat_b; int** d_mat_b; int** dd_mat_b; int** mat_c; int** d_mat_c; int** dd_mat_c; create5(mat_a,d_mat_a,dd_mat_a,n,m); create5(mat_b,d_mat_b,dd_mat_b,n,m); create5(mat_c,d_mat_c,dd_mat_c,n,m,0); int i; int size_row = sizeof(int*) * n; int size_col = sizeof(int ) * m; /* int** mat_a; create3(&mat_a,n,m); fill(mat_a,n,m); int **d_mat_a; int **dd_mat_a; d_mat_a = (int**) malloc(size_row); cudaMalloc((void**)& d_mat_a[0], sizeof(int) * m * n ); cudaMemcpy(d_mat_a[0], mat_a[0], sizeof(int) * m * n ,cudaMemcpyHostToDevice); for(i=1;i<n;i++){ d_mat_a[i]=(d_mat_a[i-1]+m); } cudaMalloc((void***)&dd_mat_a,size_row); cudaMemcpy(dd_mat_a,d_mat_a,size_row,cudaMemcpyHostToDevice); */ print(mat_a,n,m); printf("//////////////////\n"); print(mat_b,n,m); printf("//////////////////\n"); print(mat_c,n,m); printf("//////////////////\n"); printf("//////////////////\n"); dim3 grid(N,N,1); dim3 blockNum(T,T,1); sum_matrix<<<grid,blockNum>>>(dd_mat_a,dd_mat_b,dd_mat_c,n,m); for(i=0;i<n;i++){ cudaMemcpy(mat_c[i],d_mat_c[i],size_col,cudaMemcpyDeviceToHost); } printf("//////////////////\n"); printf("//////////////////\n"); print(mat_a,n,m); printf("//////////////////\n"); print(mat_b,n,m); printf("//////////////////\n"); print(mat_c,n,m); return 0; }
d1e48e5dd53fd915b1b66a49a6a4688d9cc5cd06.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/operators/conv_transpose_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/padding.h" #include "paddle/fluid/platform/cudnn_helper.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using DataLayout = platform::DataLayout; static constexpr size_t kConvCUDNNWorkspaceLimitBytes = 1024 * 1024 * 1024; template <typename T, int D> static void DataTranspose(const framework::ExecutionContext& ctx, const Tensor* input, Tensor* output, const std::vector<int>& axis, int flag = 0) { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::Transpose<platform::CUDADeviceContext, T, D> transpose; auto in_dims = input->dims(); std::vector<int64_t> input_transpose_vec; for (size_t i = 0; i < axis.size(); ++i) { if (flag == 0) input_transpose_vec.push_back(in_dims[axis[i]]); else input_transpose_vec.push_back(in_dims[i]); } framework::DDim input_transpose_dims( framework::make_ddim(input_transpose_vec)); output->mutable_data<T>(input_transpose_dims, ctx.GetPlace()); transpose(dev_ctx, *input, output, axis); } template <typename T> class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, "It must use CUDAPlace."); auto* input = ctx.Input<Tensor>("Input"); auto* filter = ctx.Input<Tensor>("Filter"); auto* output = ctx.Output<Tensor>("Output"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); // cudnn v5 does not support dilations std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); int groups = ctx.Attr<int>("groups"); int user_workspace_size = ctx.Attr<int>("workspace_size_MB"); const T* filter_data = filter->data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_format"); const paddle::operators::DataLayout data_layout = (data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC); // if channel_last, transpose to channel_first Tensor input_transpose; std::vector<int> input_vec = framework::vectorize<int>(input->dims()); std::vector<int> output_vec = framework::vectorize<int>(output->dims()); if (data_layout == DataLayout::kNHWC) { if (strides.size() == 2U) { std::vector<int> axis = {0, 3, 1, 2}; for (size_t i = 0; i < axis.size(); ++i) { input_vec[i] = input->dims()[axis[i]]; output_vec[i] = output->dims()[axis[i]]; } DataTranspose<T, 4>(ctx, input, &input_transpose, axis); } else if (strides.size() == 3U) { std::vector<int> axis = {0, 4, 1, 2, 3}; for (size_t i = 0; i < axis.size(); ++i) { input_vec[i] = input->dims()[axis[i]]; output_vec[i] = output->dims()[axis[i]]; } DataTranspose<T, 5>(ctx, input, &input_transpose, axis); } } else { input_transpose = *input; } // update padding and dilation auto in_dims = input_transpose.dims(); auto filter_dims = filter->dims(); framework::DDim in_data_dims; in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); framework::DDim filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); std::vector<int> input_pad(input_transpose.dims().size() * 2, 0); Tensor transformed_input; std::vector<int> padding_common(data_dim, 0); if (!is_sys_pad) { std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = input_transpose.dims()[0]; new_input_shape_vec[1] = input_transpose.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = input_transpose.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } framework::DDim new_input_shape( framework::make_ddim(new_input_shape_vec)); transformed_input.Resize(new_input_shape); auto& dev_ctx = ctx.template device_context<paddle::platform::CUDADeviceContext>(); transformed_input = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); const int rank = input_transpose.dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, input_transpose, pad_value, &transformed_input); } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, input_transpose, pad_value, &transformed_input); } break; default: PADDLE_ENFORCE_EQ( rank == 4 || rank == 5, true, "Op(ConvTranspose) only supports 4-D or 5-D input Tensor."); } } else { transformed_input = input_transpose; if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } std::vector<int64_t> starts(data_dim, 0); std::vector<int64_t> ends(data_dim, 0); std::vector<int64_t> axes(data_dim, 0); for (size_t i = 0; i < data_dim; ++i) { starts[i] = input_pad[2 * i + 4] * (strides[i] + 1); ends[i] = starts[i] + output_vec[i + 2]; axes[i] = i + 2; } const T* input_data = transformed_input.data<T>(); input_vec = framework::vectorize<int>(transformed_input.dims()); std::vector<int> transformed_output_vec = output_vec; for (size_t i = 0; i < data_dim; ++i) { transformed_output_vec[i + 2] = output_vec[i + 2] + (input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] - 2 * padding_common[i] + paddings[2 * i] + paddings[2 * i + 1]; } Tensor transformed_output; if (!is_sys_pad) { DDim transformed_output_shape( framework::make_ddim(transformed_output_vec)); transformed_output.mutable_data<T>(transformed_output_shape, ctx.GetPlace()); } else { output->mutable_data<T>(ctx.GetPlace()); transformed_output.ShareDataWith(*output); transformed_output.Resize(framework::make_ddim(transformed_output_vec)); } T* transformed_output_data = transformed_output.data<T>(); // ------------------- cudnn descriptors --------------------- ScopedTensorDescriptor input_desc; ScopedTensorDescriptor output_desc; ScopedFilterDescriptor filter_desc; ScopedConvolutionDescriptor conv_desc; DataLayout layout; if (strides.size() == 2U) { layout = DataLayout::kNCHW; } else { layout = DataLayout::kNCDHW; } // (N, M, H, W) or (N, M, D, H, W) cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(layout, input_vec, groups); // (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w) cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(layout, transformed_output_vec, groups); // (M, C, K_h, K_w) or (M, C, K_d, K_h, K_w) cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>( layout, framework::vectorize<int>(filter->dims()), groups); cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor<T>(padding_common, strides, dilations); // ------------------- cudnn conv workspace --------------------- size_t workspace_size_in_bytes; // final workspace to allocate. size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes; if (user_workspace_size > 0) { workspace_size_limit = user_workspace_size * 1024 * 1024; } // ------------------- cudnn conv algorithm --------------------- cudnnConvolutionBwdDataAlgo_t algo; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); // Get the algorithm CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc, // dxDesc: Handle to the previously initialized output tensor // descriptor. cudnn_output_desc, CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT, workspace_size_limit, &algo)); if (algo == 0 && FLAGS_cudnn_deterministic) { algo = static_cast<cudnnConvolutionBwdDataAlgo_t>(1); } // get workspace size able to allocate CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize( handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); // ------------------- cudnn conv transpose forward --------------------- int input_offset = transformed_input.numel() / transformed_input.dims()[0] / groups; int output_offset = transformed_output.numel() / transformed_output.dims()[0] / groups; int filter_offset = filter->numel() / groups; T alpha = 1.0f, beta = 0.0f; auto workspace_handle = dev_ctx.cudnn_workspace_handle(); for (int g = 0; g < groups; g++) { auto cudnn_func = [&](void* cudnn_workspace) { CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, cudnn_filter_desc, filter_data + filter_offset * g, cudnn_input_desc, input_data + input_offset * g, cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_output_desc, transformed_output_data + output_offset * g)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } if (!is_sys_pad && strides.size() == 2U) { Slice<paddle::platform::CUDADeviceContext, T, 4>( ctx, &transformed_output, output, starts, ends, axes); } else if (!is_sys_pad && strides.size() == 3U) { Slice<paddle::platform::CUDADeviceContext, T, 5>( ctx, &transformed_output, output, starts, ends, axes); } if (data_layout == DataLayout::kNHWC) { Tensor output_transpose; Tensor output_nchw; output_nchw.ShareDataWith(*output); output_nchw.Resize(framework::make_ddim(output_vec)); if (strides.size() == 2U) { std::vector<int> axis = {0, 2, 3, 1}; DataTranspose<T, 4>(ctx, &output_nchw, &output_transpose, axis); *output = output_transpose; } else if (strides.size() == 3U) { std::vector<int> axis = {0, 2, 3, 4, 1}; DataTranspose<T, 5>(ctx, &output_nchw, &output_transpose, axis); *output = output_transpose; } } } }; template <typename T> class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use CUDAPlace."); auto input = ctx.Input<Tensor>("Input"); auto filter = ctx.Input<Tensor>("Filter"); auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input")); auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter")); const T* filter_data = filter->data<T>(); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); // cudnn v5 does not support dilations std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); int groups = ctx.Attr<int>("groups"); std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); int user_workspace_size = ctx.Attr<int>("workspace_size_MB"); const std::string data_layout_str = ctx.Attr<std::string>("data_format"); const paddle::operators::DataLayout data_layout = (data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC); // if channel_last, transpose to channel_first Tensor input_transpose; Tensor output_grad_transpose; std::vector<int> input_vec = framework::vectorize<int>(input->dims()); std::vector<int> output_vec = framework::vectorize<int>(output_grad->dims()); if (data_layout == DataLayout::kNHWC) { if (strides.size() == 2U) { std::vector<int> axis = {0, 3, 1, 2}; for (size_t i = 0; i < axis.size(); ++i) { input_vec[i] = input->dims()[axis[i]]; output_vec[i] = output_grad->dims()[axis[i]]; } DataTranspose<T, 4>(ctx, input, &input_transpose, axis); DataTranspose<T, 4>(ctx, output_grad, &output_grad_transpose, axis); } else if (strides.size() == 3U) { std::vector<int> axis = {0, 4, 1, 2, 3}; for (size_t i = 0; i < axis.size(); ++i) { input_vec[i] = input->dims()[axis[i]]; output_vec[i] = output_grad->dims()[axis[i]]; } DataTranspose<T, 5>(ctx, input, &input_transpose, axis); DataTranspose<T, 5>(ctx, output_grad, &output_grad_transpose, axis); } } else { input_transpose = *input; output_grad_transpose = *output_grad; } // update padding and dilation auto in_dims = input_transpose.dims(); auto filter_dims = filter->dims(); framework::DDim in_data_dims; in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); framework::DDim filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); std::vector<int> input_pad(input_transpose.dims().size() * 2, 0); Tensor transformed_output_grad; std::vector<int> padding_common(data_dim, 0); if (!is_sys_pad) { std::vector<int> padding_diff(data_dim); std::vector<int> new_output_grad_shape_vec(data_dim + 2); new_output_grad_shape_vec[0] = output_grad_transpose.dims()[0]; new_output_grad_shape_vec[1] = output_grad_transpose.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]); new_output_grad_shape_vec[i + 2] = output_grad_transpose.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } framework::DDim new_output_grad_shape( framework::make_ddim(new_output_grad_shape_vec)); transformed_output_grad.Resize(new_output_grad_shape); auto& dev_ctx = ctx.template device_context<paddle::platform::CUDADeviceContext>(); transformed_output_grad = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_output_grad_shape, dev_ctx); const int rank = input_transpose.dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, output_grad_transpose, pad_value, &transformed_output_grad); } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, output_grad_transpose, pad_value, &transformed_output_grad); } break; default: PADDLE_ENFORCE_EQ( rank == 4 || rank == 5, true, "Op(ConvTranspose) only supports 4-D or 5-D input Tensor."); } } else { transformed_output_grad = output_grad_transpose; if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* input_data = input_transpose.data<T>(); const T* output_grad_data = transformed_output_grad.data<T>(); output_vec = framework::vectorize<int>(transformed_output_grad.dims()); // ------------------- cudnn descriptors --------------------- ScopedTensorDescriptor input_desc; ScopedTensorDescriptor output_desc; ScopedFilterDescriptor filter_desc; ScopedConvolutionDescriptor conv_desc; DataLayout layout; if (strides.size() == 2U) { layout = DataLayout::kNCHW; } else { layout = DataLayout::kNCDHW; } // Input: (N, M, H, W) or (N, M, D, H, W) cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(layout, input_vec, groups); // Output: (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w) cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(layout, output_vec, groups); // Filter (M, C, K_h, K_w) or (M, C, K_d K_h, K_w) cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>( layout, framework::vectorize<int>(filter->dims()), groups); cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor<T>(padding_common, strides, dilations); // ------------------- cudnn backward algorithm --------------------- cudnnConvolutionFwdAlgo_t data_algo; cudnnConvolutionBwdFilterAlgo_t filter_algo; size_t bwd_filter_ws_size, fwd_ws_size; size_t workspace_size_in_bytes = 0; size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes; if (user_workspace_size > 0) { workspace_size_limit = user_workspace_size * 1024 * 1024; } auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); if (input_grad) { // choose backward algorithm for data CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( handle, cudnn_output_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_input_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, workspace_size_limit, &data_algo)); CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, cudnn_output_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_input_desc, data_algo, &fwd_ws_size)); workspace_size_in_bytes = ::max(workspace_size_in_bytes, fwd_ws_size); } if (filter_grad) { // choose backward algorithm for filter CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( handle, cudnn_output_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_filter_desc, CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT, workspace_size_limit, &filter_algo)); // get workspace for backwards filter algorithm CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize( handle, cudnn_output_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_filter_desc, filter_algo, &bwd_filter_ws_size)); workspace_size_in_bytes = ::max(workspace_size_in_bytes, bwd_filter_ws_size); } // ------------------- cudnn conv backward data --------------------- // FIXME(typhoonzero): template type T may not be the same as cudnn call. int input_offset = input->numel() / input->dims()[0] / groups; int output_grad_offset = transformed_output_grad.numel() / transformed_output_grad.dims()[0] / groups; int filter_offset = filter->numel() / groups; T alpha = 1.0f, beta = 0.0f; auto workspace_handle = dev_ctx.cudnn_workspace_handle(); if (input_grad) { T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset input_grad. for (int g = 0; g < groups; g++) { auto cudnn_func = [&](void* cudnn_workspace) { CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( handle, &alpha, cudnn_output_desc, output_grad_data + output_grad_offset * g, cudnn_filter_desc, filter_data + filter_offset * g, cudnn_conv_desc, data_algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_input_desc, input_grad_data + input_offset * g)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } if (data_layout == DataLayout::kNHWC) { Tensor input_grad_transpose; Tensor input_grad_nchw; input_grad_nchw.ShareDataWith(*input_grad); input_grad_nchw.Resize(framework::make_ddim(input_vec)); if (strides.size() == 2U) { std::vector<int> axis = {0, 2, 3, 1}; DataTranspose<T, 4>(ctx, &input_grad_nchw, &input_grad_transpose, axis); *input_grad = input_grad_transpose; } else if (strides.size() == 3U) { std::vector<int> axis = {0, 2, 3, 4, 1}; DataTranspose<T, 5>(ctx, &input_grad_nchw, &input_grad_transpose, axis); *input_grad = input_grad_transpose; } } } // ------------------- cudnn conv backward filter --------------------- if (filter_grad) { T* filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset filter_grad. // Gradient with respect to the filter for (int g = 0; g < groups; g++) { auto cudnn_func = [&](void* cudnn_workspace) { CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, cudnn_output_desc, output_grad_data + output_grad_offset * g, cudnn_input_desc, input_data + input_offset * g, cudnn_conv_desc, filter_algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_filter_desc, filter_grad_data + filter_offset * g)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace, ops::CUDNNConvTransposeOpKernel<float>, ops::CUDNNConvTransposeOpKernel<double>); REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace, ops::CUDNNConvTransposeGradOpKernel<float>, ops::CUDNNConvTransposeGradOpKernel<double>); REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace, ops::CUDNNConvTransposeOpKernel<float>, ops::CUDNNConvTransposeOpKernel<double>); REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace, ops::CUDNNConvTransposeGradOpKernel<float>, ops::CUDNNConvTransposeGradOpKernel<double>);
d1e48e5dd53fd915b1b66a49a6a4688d9cc5cd06.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/operators/conv_transpose_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/padding.h" #include "paddle/fluid/platform/cudnn_helper.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using DataLayout = platform::DataLayout; static constexpr size_t kConvCUDNNWorkspaceLimitBytes = 1024 * 1024 * 1024; template <typename T, int D> static void DataTranspose(const framework::ExecutionContext& ctx, const Tensor* input, Tensor* output, const std::vector<int>& axis, int flag = 0) { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::Transpose<platform::CUDADeviceContext, T, D> transpose; auto in_dims = input->dims(); std::vector<int64_t> input_transpose_vec; for (size_t i = 0; i < axis.size(); ++i) { if (flag == 0) input_transpose_vec.push_back(in_dims[axis[i]]); else input_transpose_vec.push_back(in_dims[i]); } framework::DDim input_transpose_dims( framework::make_ddim(input_transpose_vec)); output->mutable_data<T>(input_transpose_dims, ctx.GetPlace()); transpose(dev_ctx, *input, output, axis); } template <typename T> class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, "It must use CUDAPlace."); auto* input = ctx.Input<Tensor>("Input"); auto* filter = ctx.Input<Tensor>("Filter"); auto* output = ctx.Output<Tensor>("Output"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); // cudnn v5 does not support dilations std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); int groups = ctx.Attr<int>("groups"); int user_workspace_size = ctx.Attr<int>("workspace_size_MB"); const T* filter_data = filter->data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_format"); const paddle::operators::DataLayout data_layout = (data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC); // if channel_last, transpose to channel_first Tensor input_transpose; std::vector<int> input_vec = framework::vectorize<int>(input->dims()); std::vector<int> output_vec = framework::vectorize<int>(output->dims()); if (data_layout == DataLayout::kNHWC) { if (strides.size() == 2U) { std::vector<int> axis = {0, 3, 1, 2}; for (size_t i = 0; i < axis.size(); ++i) { input_vec[i] = input->dims()[axis[i]]; output_vec[i] = output->dims()[axis[i]]; } DataTranspose<T, 4>(ctx, input, &input_transpose, axis); } else if (strides.size() == 3U) { std::vector<int> axis = {0, 4, 1, 2, 3}; for (size_t i = 0; i < axis.size(); ++i) { input_vec[i] = input->dims()[axis[i]]; output_vec[i] = output->dims()[axis[i]]; } DataTranspose<T, 5>(ctx, input, &input_transpose, axis); } } else { input_transpose = *input; } // update padding and dilation auto in_dims = input_transpose.dims(); auto filter_dims = filter->dims(); framework::DDim in_data_dims; in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); framework::DDim filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); std::vector<int> input_pad(input_transpose.dims().size() * 2, 0); Tensor transformed_input; std::vector<int> padding_common(data_dim, 0); if (!is_sys_pad) { std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = input_transpose.dims()[0]; new_input_shape_vec[1] = input_transpose.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = input_transpose.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } framework::DDim new_input_shape( framework::make_ddim(new_input_shape_vec)); transformed_input.Resize(new_input_shape); auto& dev_ctx = ctx.template device_context<paddle::platform::CUDADeviceContext>(); transformed_input = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); const int rank = input_transpose.dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, input_transpose, pad_value, &transformed_input); } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, input_transpose, pad_value, &transformed_input); } break; default: PADDLE_ENFORCE_EQ( rank == 4 || rank == 5, true, "Op(ConvTranspose) only supports 4-D or 5-D input Tensor."); } } else { transformed_input = input_transpose; if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } std::vector<int64_t> starts(data_dim, 0); std::vector<int64_t> ends(data_dim, 0); std::vector<int64_t> axes(data_dim, 0); for (size_t i = 0; i < data_dim; ++i) { starts[i] = input_pad[2 * i + 4] * (strides[i] + 1); ends[i] = starts[i] + output_vec[i + 2]; axes[i] = i + 2; } const T* input_data = transformed_input.data<T>(); input_vec = framework::vectorize<int>(transformed_input.dims()); std::vector<int> transformed_output_vec = output_vec; for (size_t i = 0; i < data_dim; ++i) { transformed_output_vec[i + 2] = output_vec[i + 2] + (input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] - 2 * padding_common[i] + paddings[2 * i] + paddings[2 * i + 1]; } Tensor transformed_output; if (!is_sys_pad) { DDim transformed_output_shape( framework::make_ddim(transformed_output_vec)); transformed_output.mutable_data<T>(transformed_output_shape, ctx.GetPlace()); } else { output->mutable_data<T>(ctx.GetPlace()); transformed_output.ShareDataWith(*output); transformed_output.Resize(framework::make_ddim(transformed_output_vec)); } T* transformed_output_data = transformed_output.data<T>(); // ------------------- cudnn descriptors --------------------- ScopedTensorDescriptor input_desc; ScopedTensorDescriptor output_desc; ScopedFilterDescriptor filter_desc; ScopedConvolutionDescriptor conv_desc; DataLayout layout; if (strides.size() == 2U) { layout = DataLayout::kNCHW; } else { layout = DataLayout::kNCDHW; } // (N, M, H, W) or (N, M, D, H, W) cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(layout, input_vec, groups); // (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w) cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(layout, transformed_output_vec, groups); // (M, C, K_h, K_w) or (M, C, K_d, K_h, K_w) cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>( layout, framework::vectorize<int>(filter->dims()), groups); cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor<T>(padding_common, strides, dilations); // ------------------- cudnn conv workspace --------------------- size_t workspace_size_in_bytes; // final workspace to allocate. size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes; if (user_workspace_size > 0) { workspace_size_limit = user_workspace_size * 1024 * 1024; } // ------------------- cudnn conv algorithm --------------------- cudnnConvolutionBwdDataAlgo_t algo; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); // Get the algorithm CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc, // dxDesc: Handle to the previously initialized output tensor // descriptor. cudnn_output_desc, CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT, workspace_size_limit, &algo)); if (algo == 0 && FLAGS_cudnn_deterministic) { algo = static_cast<cudnnConvolutionBwdDataAlgo_t>(1); } // get workspace size able to allocate CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize( handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); // ------------------- cudnn conv transpose forward --------------------- int input_offset = transformed_input.numel() / transformed_input.dims()[0] / groups; int output_offset = transformed_output.numel() / transformed_output.dims()[0] / groups; int filter_offset = filter->numel() / groups; T alpha = 1.0f, beta = 0.0f; auto workspace_handle = dev_ctx.cudnn_workspace_handle(); for (int g = 0; g < groups; g++) { auto cudnn_func = [&](void* cudnn_workspace) { CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, cudnn_filter_desc, filter_data + filter_offset * g, cudnn_input_desc, input_data + input_offset * g, cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_output_desc, transformed_output_data + output_offset * g)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } if (!is_sys_pad && strides.size() == 2U) { Slice<paddle::platform::CUDADeviceContext, T, 4>( ctx, &transformed_output, output, starts, ends, axes); } else if (!is_sys_pad && strides.size() == 3U) { Slice<paddle::platform::CUDADeviceContext, T, 5>( ctx, &transformed_output, output, starts, ends, axes); } if (data_layout == DataLayout::kNHWC) { Tensor output_transpose; Tensor output_nchw; output_nchw.ShareDataWith(*output); output_nchw.Resize(framework::make_ddim(output_vec)); if (strides.size() == 2U) { std::vector<int> axis = {0, 2, 3, 1}; DataTranspose<T, 4>(ctx, &output_nchw, &output_transpose, axis); *output = output_transpose; } else if (strides.size() == 3U) { std::vector<int> axis = {0, 2, 3, 4, 1}; DataTranspose<T, 5>(ctx, &output_nchw, &output_transpose, axis); *output = output_transpose; } } } }; template <typename T> class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use CUDAPlace."); auto input = ctx.Input<Tensor>("Input"); auto filter = ctx.Input<Tensor>("Filter"); auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input")); auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter")); const T* filter_data = filter->data<T>(); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); // cudnn v5 does not support dilations std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); int groups = ctx.Attr<int>("groups"); std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); int user_workspace_size = ctx.Attr<int>("workspace_size_MB"); const std::string data_layout_str = ctx.Attr<std::string>("data_format"); const paddle::operators::DataLayout data_layout = (data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC); // if channel_last, transpose to channel_first Tensor input_transpose; Tensor output_grad_transpose; std::vector<int> input_vec = framework::vectorize<int>(input->dims()); std::vector<int> output_vec = framework::vectorize<int>(output_grad->dims()); if (data_layout == DataLayout::kNHWC) { if (strides.size() == 2U) { std::vector<int> axis = {0, 3, 1, 2}; for (size_t i = 0; i < axis.size(); ++i) { input_vec[i] = input->dims()[axis[i]]; output_vec[i] = output_grad->dims()[axis[i]]; } DataTranspose<T, 4>(ctx, input, &input_transpose, axis); DataTranspose<T, 4>(ctx, output_grad, &output_grad_transpose, axis); } else if (strides.size() == 3U) { std::vector<int> axis = {0, 4, 1, 2, 3}; for (size_t i = 0; i < axis.size(); ++i) { input_vec[i] = input->dims()[axis[i]]; output_vec[i] = output_grad->dims()[axis[i]]; } DataTranspose<T, 5>(ctx, input, &input_transpose, axis); DataTranspose<T, 5>(ctx, output_grad, &output_grad_transpose, axis); } } else { input_transpose = *input; output_grad_transpose = *output_grad; } // update padding and dilation auto in_dims = input_transpose.dims(); auto filter_dims = filter->dims(); framework::DDim in_data_dims; in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); framework::DDim filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); std::vector<int> input_pad(input_transpose.dims().size() * 2, 0); Tensor transformed_output_grad; std::vector<int> padding_common(data_dim, 0); if (!is_sys_pad) { std::vector<int> padding_diff(data_dim); std::vector<int> new_output_grad_shape_vec(data_dim + 2); new_output_grad_shape_vec[0] = output_grad_transpose.dims()[0]; new_output_grad_shape_vec[1] = output_grad_transpose.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); new_output_grad_shape_vec[i + 2] = output_grad_transpose.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } framework::DDim new_output_grad_shape( framework::make_ddim(new_output_grad_shape_vec)); transformed_output_grad.Resize(new_output_grad_shape); auto& dev_ctx = ctx.template device_context<paddle::platform::CUDADeviceContext>(); transformed_output_grad = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_output_grad_shape, dev_ctx); const int rank = input_transpose.dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, output_grad_transpose, pad_value, &transformed_output_grad); } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, output_grad_transpose, pad_value, &transformed_output_grad); } break; default: PADDLE_ENFORCE_EQ( rank == 4 || rank == 5, true, "Op(ConvTranspose) only supports 4-D or 5-D input Tensor."); } } else { transformed_output_grad = output_grad_transpose; if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* input_data = input_transpose.data<T>(); const T* output_grad_data = transformed_output_grad.data<T>(); output_vec = framework::vectorize<int>(transformed_output_grad.dims()); // ------------------- cudnn descriptors --------------------- ScopedTensorDescriptor input_desc; ScopedTensorDescriptor output_desc; ScopedFilterDescriptor filter_desc; ScopedConvolutionDescriptor conv_desc; DataLayout layout; if (strides.size() == 2U) { layout = DataLayout::kNCHW; } else { layout = DataLayout::kNCDHW; } // Input: (N, M, H, W) or (N, M, D, H, W) cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(layout, input_vec, groups); // Output: (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w) cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(layout, output_vec, groups); // Filter (M, C, K_h, K_w) or (M, C, K_d K_h, K_w) cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>( layout, framework::vectorize<int>(filter->dims()), groups); cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor<T>(padding_common, strides, dilations); // ------------------- cudnn backward algorithm --------------------- cudnnConvolutionFwdAlgo_t data_algo; cudnnConvolutionBwdFilterAlgo_t filter_algo; size_t bwd_filter_ws_size, fwd_ws_size; size_t workspace_size_in_bytes = 0; size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes; if (user_workspace_size > 0) { workspace_size_limit = user_workspace_size * 1024 * 1024; } auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); if (input_grad) { // choose backward algorithm for data CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( handle, cudnn_output_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_input_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, workspace_size_limit, &data_algo)); CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, cudnn_output_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_input_desc, data_algo, &fwd_ws_size)); workspace_size_in_bytes = std::max(workspace_size_in_bytes, fwd_ws_size); } if (filter_grad) { // choose backward algorithm for filter CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( handle, cudnn_output_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_filter_desc, CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT, workspace_size_limit, &filter_algo)); // get workspace for backwards filter algorithm CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize( handle, cudnn_output_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_filter_desc, filter_algo, &bwd_filter_ws_size)); workspace_size_in_bytes = std::max(workspace_size_in_bytes, bwd_filter_ws_size); } // ------------------- cudnn conv backward data --------------------- // FIXME(typhoonzero): template type T may not be the same as cudnn call. int input_offset = input->numel() / input->dims()[0] / groups; int output_grad_offset = transformed_output_grad.numel() / transformed_output_grad.dims()[0] / groups; int filter_offset = filter->numel() / groups; T alpha = 1.0f, beta = 0.0f; auto workspace_handle = dev_ctx.cudnn_workspace_handle(); if (input_grad) { T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset input_grad. for (int g = 0; g < groups; g++) { auto cudnn_func = [&](void* cudnn_workspace) { CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( handle, &alpha, cudnn_output_desc, output_grad_data + output_grad_offset * g, cudnn_filter_desc, filter_data + filter_offset * g, cudnn_conv_desc, data_algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_input_desc, input_grad_data + input_offset * g)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } if (data_layout == DataLayout::kNHWC) { Tensor input_grad_transpose; Tensor input_grad_nchw; input_grad_nchw.ShareDataWith(*input_grad); input_grad_nchw.Resize(framework::make_ddim(input_vec)); if (strides.size() == 2U) { std::vector<int> axis = {0, 2, 3, 1}; DataTranspose<T, 4>(ctx, &input_grad_nchw, &input_grad_transpose, axis); *input_grad = input_grad_transpose; } else if (strides.size() == 3U) { std::vector<int> axis = {0, 2, 3, 4, 1}; DataTranspose<T, 5>(ctx, &input_grad_nchw, &input_grad_transpose, axis); *input_grad = input_grad_transpose; } } } // ------------------- cudnn conv backward filter --------------------- if (filter_grad) { T* filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset filter_grad. // Gradient with respect to the filter for (int g = 0; g < groups; g++) { auto cudnn_func = [&](void* cudnn_workspace) { CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, cudnn_output_desc, output_grad_data + output_grad_offset * g, cudnn_input_desc, input_data + input_offset * g, cudnn_conv_desc, filter_algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_filter_desc, filter_grad_data + filter_offset * g)); }; workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace, ops::CUDNNConvTransposeOpKernel<float>, ops::CUDNNConvTransposeOpKernel<double>); REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace, ops::CUDNNConvTransposeGradOpKernel<float>, ops::CUDNNConvTransposeGradOpKernel<double>); REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace, ops::CUDNNConvTransposeOpKernel<float>, ops::CUDNNConvTransposeOpKernel<double>); REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace, ops::CUDNNConvTransposeGradOpKernel<float>, ops::CUDNNConvTransposeGradOpKernel<double>);
f75cca7a358657f6d535529b5358de91e8143d6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/filler_op.h" #include "caffe2/operators/operator_fallback_gpu.h" namespace caffe2 { namespace { __global__ void FillRangeKernel(const int n, float* data) { CUDA_1D_KERNEL_LOOP(index, n) { data[index] = index; } } template <typename T> __global__ void FillDiagonalKernel( const int num_diagonal_elements, const TIndex step_size, const T value, T* data) { CUDA_1D_KERNEL_LOOP(index, num_diagonal_elements) { data[index * step_size] = value; } } } template <> bool RangeFillOp<float, CUDAContext>::Fill(TensorCUDA* output) { int N = output->size(); hipLaunchKernelGGL(( FillRangeKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, output->mutable_data<float>()); return true; } template <> template <typename T> bool DiagonalFillOp<CUDAContext>::FillWithType(TensorCUDA* output) { VerifyOutputShape(output); auto* data = output->template mutable_data<T>(); int size = output->size(); // first fill everything with 0 math::Set<T, CUDAContext>(size, T(0), data, &context_); T value = OperatorBase::GetSingleArgument<T>("value", 0); TIndex step_size = GetStepSize(output); int num_diagonal_elements = ceil((float)size / step_size); hipLaunchKernelGGL(( FillDiagonalKernel), dim3(CAFFE_GET_BLOCKS(num_diagonal_elements)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), num_diagonal_elements, step_size, value, data); return true; } REGISTER_CUDA_OPERATOR(UniformFill, UniformFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(UniformIntFill, UniformFillOp<int, CUDAContext>); REGISTER_CUDA_OPERATOR(ConstantFill, ConstantFillOp<CUDAContext>); REGISTER_CUDA_OPERATOR(DiagonalFill, DiagonalFillOp<CUDAContext>); REGISTER_CUDA_OPERATOR(GaussianFill, GaussianFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(XavierFill, XavierFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MSRAFill, MSRAFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(RangeFill, RangeFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( LengthsRangeFill, GPUFallbackOp<LengthsRangeFillOp<CPUContext>>); } // namespace caffe2
f75cca7a358657f6d535529b5358de91e8143d6b.cu
#include <cmath> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/filler_op.h" #include "caffe2/operators/operator_fallback_gpu.h" namespace caffe2 { namespace { __global__ void FillRangeKernel(const int n, float* data) { CUDA_1D_KERNEL_LOOP(index, n) { data[index] = index; } } template <typename T> __global__ void FillDiagonalKernel( const int num_diagonal_elements, const TIndex step_size, const T value, T* data) { CUDA_1D_KERNEL_LOOP(index, num_diagonal_elements) { data[index * step_size] = value; } } } template <> bool RangeFillOp<float, CUDAContext>::Fill(TensorCUDA* output) { int N = output->size(); FillRangeKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, output->mutable_data<float>()); return true; } template <> template <typename T> bool DiagonalFillOp<CUDAContext>::FillWithType(TensorCUDA* output) { VerifyOutputShape(output); auto* data = output->template mutable_data<T>(); int size = output->size(); // first fill everything with 0 math::Set<T, CUDAContext>(size, T(0), data, &context_); T value = OperatorBase::GetSingleArgument<T>("value", 0); TIndex step_size = GetStepSize(output); int num_diagonal_elements = ceil((float)size / step_size); FillDiagonalKernel<<< CAFFE_GET_BLOCKS(num_diagonal_elements), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(num_diagonal_elements, step_size, value, data); return true; } REGISTER_CUDA_OPERATOR(UniformFill, UniformFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(UniformIntFill, UniformFillOp<int, CUDAContext>); REGISTER_CUDA_OPERATOR(ConstantFill, ConstantFillOp<CUDAContext>); REGISTER_CUDA_OPERATOR(DiagonalFill, DiagonalFillOp<CUDAContext>); REGISTER_CUDA_OPERATOR(GaussianFill, GaussianFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(XavierFill, XavierFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MSRAFill, MSRAFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(RangeFill, RangeFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( LengthsRangeFill, GPUFallbackOp<LengthsRangeFillOp<CPUContext>>); } // namespace caffe2
191cbbcc7f25ddd5e4ea40f2e8577a53c28263a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <hipcub/hipcub.hpp> #include <raft/cuda_utils.cuh> #include <raft/mr/device/allocator.hpp> #include <random/make_blobs.cuh> #include "test_utils.h" namespace MLCommon { namespace Random { template <typename T> __global__ void meanKernel(T* out, int* lens, const T* data, const int* labels, int nrows, int ncols, int nclusters, bool row_major) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int rowid = row_major ? tid / ncols : tid % nrows; int colid = row_major ? tid % ncols : tid / nrows; if (rowid < nrows && colid < ncols) { T val = data[tid]; int label = labels[rowid]; int idx = row_major ? label * ncols + colid : colid * nclusters + label; raft::myAtomicAdd(out + idx * 2, val); raft::myAtomicAdd(out + idx * 2 + 1, val * val); if (colid == 0) { raft::myAtomicAdd(lens + label, 1); } } } template <typename T> __global__ void compute_mean_var(T* out, const T* stats, int* lens, int nrows, int ncols, bool row_major) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int rowid = row_major ? tid / ncols : tid % nrows; int colid = row_major ? tid % ncols : tid / nrows; int stride = nrows * ncols; if (rowid < nrows && colid < ncols) { int len = lens[rowid]; auto mean = stats[tid * 2] / len; out[tid] = mean; out[tid + stride] = (stats[tid * 2 + 1] / len) - (mean * mean); } } template <typename T> struct MakeBlobsInputs { T tolerance; int rows, cols, n_clusters; T std; bool row_major, shuffle; raft::random::GeneratorType gtype; uint64_t seed; }; template <typename T> class MakeBlobsTest : public ::testing::TestWithParam<MakeBlobsInputs<T>> { protected: void SetUp() override { // Tests are configured with their expected test-values sigma. For example, // 4 x sigma indicates the test shouldn't fail 99.9% of the time. num_sigma = 50; allocator.reset(new raft::mr::device::default_allocator); params = ::testing::TestWithParam<MakeBlobsInputs<T>>::GetParam(); int len = params.rows * params.cols; CUDA_CHECK(hipStreamCreate(&stream)); raft::random::Rng r(params.seed, params.gtype); raft::allocate(data, len); raft::allocate(labels, params.rows); raft::allocate(stats, 2 * params.n_clusters * params.cols, true); raft::allocate(mean_var, 2 * params.n_clusters * params.cols, true); raft::allocate(mu_vec, params.cols * params.n_clusters); raft::allocate(lens, params.n_clusters, true); r.uniform(mu_vec, params.cols * params.n_clusters, T(-10.0), T(10.0), stream); T* sigma_vec = nullptr; make_blobs(data, labels, params.rows, params.cols, params.n_clusters, allocator, stream, params.row_major, mu_vec, sigma_vec, params.std, params.shuffle, T(-10.0), T(10.0), params.seed, params.gtype); static const int threads = 128; hipLaunchKernelGGL(( meanKernel<T>), dim3(raft::ceildiv(len, threads)), dim3(threads), 0, stream, stats, lens, data, labels, params.rows, params.cols, params.n_clusters, params.row_major); int len1 = params.n_clusters * params.cols; hipLaunchKernelGGL(( compute_mean_var<T>), dim3(raft::ceildiv(len1, threads)), dim3(threads), 0, stream, mean_var, stats, lens, params.n_clusters, params.cols, params.row_major); } void TearDown() override { CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(labels)); CUDA_CHECK(hipFree(stats)); CUDA_CHECK(hipFree(mu_vec)); } void check() { int len = params.n_clusters * params.cols; auto compare = raft::CompareApprox<T>(num_sigma * params.tolerance); ASSERT_TRUE(raft::devArrMatch(mu_vec, mean_var, len, compare)); ASSERT_TRUE(raft::devArrMatch(params.std, mean_var + len, len, compare)); } protected: hipStream_t stream; MakeBlobsInputs<T> params; int *labels, *lens; T *data, *stats, *mu_vec, *mean_var; std::shared_ptr<raft::mr::device::allocator> allocator; int num_sigma; }; typedef MakeBlobsTest<float> MakeBlobsTestF; const std::vector<MakeBlobsInputs<float>> inputsf_t = { {0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenKiss99, 1234ULL}, }; TEST_P(MakeBlobsTestF, Result) { check(); } INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestF, ::testing::ValuesIn(inputsf_t)); typedef MakeBlobsTest<double> MakeBlobsTestD; const std::vector<MakeBlobsInputs<double>> inputsd_t = { {0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenKiss99, 1234ULL}, }; TEST_P(MakeBlobsTestD, Result) { check(); } INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestD, ::testing::ValuesIn(inputsd_t)); } // end namespace Random } // end namespace MLCommon
191cbbcc7f25ddd5e4ea40f2e8577a53c28263a9.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <cub/cub.cuh> #include <raft/cuda_utils.cuh> #include <raft/mr/device/allocator.hpp> #include <random/make_blobs.cuh> #include "test_utils.h" namespace MLCommon { namespace Random { template <typename T> __global__ void meanKernel(T* out, int* lens, const T* data, const int* labels, int nrows, int ncols, int nclusters, bool row_major) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int rowid = row_major ? tid / ncols : tid % nrows; int colid = row_major ? tid % ncols : tid / nrows; if (rowid < nrows && colid < ncols) { T val = data[tid]; int label = labels[rowid]; int idx = row_major ? label * ncols + colid : colid * nclusters + label; raft::myAtomicAdd(out + idx * 2, val); raft::myAtomicAdd(out + idx * 2 + 1, val * val); if (colid == 0) { raft::myAtomicAdd(lens + label, 1); } } } template <typename T> __global__ void compute_mean_var(T* out, const T* stats, int* lens, int nrows, int ncols, bool row_major) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int rowid = row_major ? tid / ncols : tid % nrows; int colid = row_major ? tid % ncols : tid / nrows; int stride = nrows * ncols; if (rowid < nrows && colid < ncols) { int len = lens[rowid]; auto mean = stats[tid * 2] / len; out[tid] = mean; out[tid + stride] = (stats[tid * 2 + 1] / len) - (mean * mean); } } template <typename T> struct MakeBlobsInputs { T tolerance; int rows, cols, n_clusters; T std; bool row_major, shuffle; raft::random::GeneratorType gtype; uint64_t seed; }; template <typename T> class MakeBlobsTest : public ::testing::TestWithParam<MakeBlobsInputs<T>> { protected: void SetUp() override { // Tests are configured with their expected test-values sigma. For example, // 4 x sigma indicates the test shouldn't fail 99.9% of the time. num_sigma = 50; allocator.reset(new raft::mr::device::default_allocator); params = ::testing::TestWithParam<MakeBlobsInputs<T>>::GetParam(); int len = params.rows * params.cols; CUDA_CHECK(cudaStreamCreate(&stream)); raft::random::Rng r(params.seed, params.gtype); raft::allocate(data, len); raft::allocate(labels, params.rows); raft::allocate(stats, 2 * params.n_clusters * params.cols, true); raft::allocate(mean_var, 2 * params.n_clusters * params.cols, true); raft::allocate(mu_vec, params.cols * params.n_clusters); raft::allocate(lens, params.n_clusters, true); r.uniform(mu_vec, params.cols * params.n_clusters, T(-10.0), T(10.0), stream); T* sigma_vec = nullptr; make_blobs(data, labels, params.rows, params.cols, params.n_clusters, allocator, stream, params.row_major, mu_vec, sigma_vec, params.std, params.shuffle, T(-10.0), T(10.0), params.seed, params.gtype); static const int threads = 128; meanKernel<T><<<raft::ceildiv(len, threads), threads, 0, stream>>>( stats, lens, data, labels, params.rows, params.cols, params.n_clusters, params.row_major); int len1 = params.n_clusters * params.cols; compute_mean_var<T><<<raft::ceildiv(len1, threads), threads, 0, stream>>>( mean_var, stats, lens, params.n_clusters, params.cols, params.row_major); } void TearDown() override { CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(labels)); CUDA_CHECK(cudaFree(stats)); CUDA_CHECK(cudaFree(mu_vec)); } void check() { int len = params.n_clusters * params.cols; auto compare = raft::CompareApprox<T>(num_sigma * params.tolerance); ASSERT_TRUE(raft::devArrMatch(mu_vec, mean_var, len, compare)); ASSERT_TRUE(raft::devArrMatch(params.std, mean_var + len, len, compare)); } protected: cudaStream_t stream; MakeBlobsInputs<T> params; int *labels, *lens; T *data, *stats, *mu_vec, *mean_var; std::shared_ptr<raft::mr::device::allocator> allocator; int num_sigma; }; typedef MakeBlobsTest<float> MakeBlobsTestF; const std::vector<MakeBlobsInputs<float>> inputsf_t = { {0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenKiss99, 1234ULL}, }; TEST_P(MakeBlobsTestF, Result) { check(); } INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestF, ::testing::ValuesIn(inputsf_t)); typedef MakeBlobsTest<double> MakeBlobsTestD; const std::vector<MakeBlobsInputs<double>> inputsd_t = { {0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenTaps, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenTaps, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenKiss99, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenKiss99, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenTaps, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenTaps, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenKiss99, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenKiss99, 1234ULL}, }; TEST_P(MakeBlobsTestD, Result) { check(); } INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestD, ::testing::ValuesIn(inputsd_t)); } // end namespace Random } // end namespace MLCommon
d1d20a9ddf7fb894960e0c94f505d4e7420d56d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/lookup_table_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T, int BlockDimX, int BlockDimY, int GridDimX, bool PaddingFlag> __global__ void LookupTable(T *output, const T *table, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D, const int64_t padding_idx) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); T *out = output + idy * D; const T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { if (PaddingFlag) { if (id == padding_idx) out[i] = static_cast<T>(0); else out[i] = tab[i]; } else { out[i] = tab[i]; } } idy += BlockDimY * GridDimX; } } template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); const T *out = output + idy * D; T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { paddle::platform::CudaAtomicAdd(&tab[i], out[i]); } idy += BlockDimY * GridDimX; } } template <typename T> class LookupTableCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *table_t = context.Input<LoDTensor>("W"); auto *ids_t = context.Input<LoDTensor>("Ids"); auto *output_t = context.Output<LoDTensor>("Out"); int64_t padding_idx = context.Attr<int64_t>("padding_idx"); auto id_name = context.InputNames("Ids").front(); auto out_name = context.OutputNames("Out").front(); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; size_t K = ids_t->numel(); auto *ids = ids_t->data<int64_t>(); auto *table = table_t->data<T>(); auto *output = output_t->mutable_data<T>(context.GetPlace()); dim3 threads(128, 8); dim3 grids(8, 1); if (padding_idx == -1) hipLaunchKernelGGL(( LookupTable< T, 128, 8, 8, false>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(), output, table, ids, N, K, D, padding_idx); else hipLaunchKernelGGL(( LookupTable< T, 128, 8, 8, true>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(), output, table, ids, N, K, D, padding_idx); } }; template <typename T> class LookupTableGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); bool is_sparse = context.Attr<bool>("is_sparse"); // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { auto *ids = context.Input<LoDTensor>("Ids"); auto *table = context.Input<LoDTensor>("W"); auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out")); auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W")); auto *ids_data = ids->data<int64_t>(); int64_t ids_num = ids->numel(); auto stream = dev_ctx.stream(); // copy GPU memory to CPU pinned memory framework::Vector<int64_t> new_rows; new_rows.resize(ids_num); auto gpu_place = boost::get<platform::CUDAPlace>(context.GetPlace()); // TODO(yuyang18): Strange code here. memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()), gpu_place, ids_data, ids_num * sizeof(int64_t), stream); d_table->set_rows(new_rows); auto *d_table_value = d_table->mutable_value(); d_table_value->Resize({ids_num, table->dims()[1]}); d_table_value->mutable_data<T>(context.GetPlace()); auto *d_table_data = d_table_value->data<T>(); auto *d_output_data = d_output->data<T>(); auto d_output_dims = d_output->dims(); auto d_output_dims_2d = framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1); PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d, "ShapeError: The shape of lookup_table@Grad and " "output@Grad should be same. " "But received lookup_table@Grad's shape = [%s], " "output@Grad's shape = [%s].", d_table_value->dims(), d_output_dims_2d); memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, d_output->numel() * sizeof(T), stream); } else { auto ids_t = context.Input<LoDTensor>("Ids"); auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out")); auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W")); int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; int K = ids_t->numel(); const int64_t *ids = ids_t->data<int64_t>(); const T *d_output = d_output_t->data<T>(); T *d_table = d_table_t->mutable_data<T>(context.GetPlace()); auto t = framework::EigenVector<T>::Flatten(*d_table_t); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); dim3 threads(128, 8); dim3 grids(8, 1); hipLaunchKernelGGL(( LookupTableGrad<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, dev_ctx.stream(), d_table, d_output, ids, N, K, D); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>, ops::LookupTableCUDAKernel<double>, ops::LookupTableCUDAKernel<plat::float16>, ops::LookupTableCUDAKernel<int8_t>); REGISTER_OP_CUDA_KERNEL(lookup_table_grad, ops::LookupTableGradCUDAKernel<float>, ops::LookupTableGradCUDAKernel<double>, ops::LookupTableGradCUDAKernel<plat::float16>);
d1d20a9ddf7fb894960e0c94f505d4e7420d56d5.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/lookup_table_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T, int BlockDimX, int BlockDimY, int GridDimX, bool PaddingFlag> __global__ void LookupTable(T *output, const T *table, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D, const int64_t padding_idx) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); T *out = output + idy * D; const T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { if (PaddingFlag) { if (id == padding_idx) out[i] = static_cast<T>(0); else out[i] = tab[i]; } else { out[i] = tab[i]; } } idy += BlockDimY * GridDimX; } } template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); const T *out = output + idy * D; T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { paddle::platform::CudaAtomicAdd(&tab[i], out[i]); } idy += BlockDimY * GridDimX; } } template <typename T> class LookupTableCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *table_t = context.Input<LoDTensor>("W"); auto *ids_t = context.Input<LoDTensor>("Ids"); auto *output_t = context.Output<LoDTensor>("Out"); int64_t padding_idx = context.Attr<int64_t>("padding_idx"); auto id_name = context.InputNames("Ids").front(); auto out_name = context.OutputNames("Out").front(); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; size_t K = ids_t->numel(); auto *ids = ids_t->data<int64_t>(); auto *table = table_t->data<T>(); auto *output = output_t->mutable_data<T>(context.GetPlace()); dim3 threads(128, 8); dim3 grids(8, 1); if (padding_idx == -1) LookupTable< T, 128, 8, 8, false><<<grids, threads, 0, context.cuda_device_context().stream()>>>( output, table, ids, N, K, D, padding_idx); else LookupTable< T, 128, 8, 8, true><<<grids, threads, 0, context.cuda_device_context().stream()>>>( output, table, ids, N, K, D, padding_idx); } }; template <typename T> class LookupTableGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); bool is_sparse = context.Attr<bool>("is_sparse"); // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { auto *ids = context.Input<LoDTensor>("Ids"); auto *table = context.Input<LoDTensor>("W"); auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out")); auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W")); auto *ids_data = ids->data<int64_t>(); int64_t ids_num = ids->numel(); auto stream = dev_ctx.stream(); // copy GPU memory to CPU pinned memory framework::Vector<int64_t> new_rows; new_rows.resize(ids_num); auto gpu_place = boost::get<platform::CUDAPlace>(context.GetPlace()); // TODO(yuyang18): Strange code here. memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()), gpu_place, ids_data, ids_num * sizeof(int64_t), stream); d_table->set_rows(new_rows); auto *d_table_value = d_table->mutable_value(); d_table_value->Resize({ids_num, table->dims()[1]}); d_table_value->mutable_data<T>(context.GetPlace()); auto *d_table_data = d_table_value->data<T>(); auto *d_output_data = d_output->data<T>(); auto d_output_dims = d_output->dims(); auto d_output_dims_2d = framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1); PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d, "ShapeError: The shape of lookup_table@Grad and " "output@Grad should be same. " "But received lookup_table@Grad's shape = [%s], " "output@Grad's shape = [%s].", d_table_value->dims(), d_output_dims_2d); memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, d_output->numel() * sizeof(T), stream); } else { auto ids_t = context.Input<LoDTensor>("Ids"); auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out")); auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W")); int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; int K = ids_t->numel(); const int64_t *ids = ids_t->data<int64_t>(); const T *d_output = d_output_t->data<T>(); T *d_table = d_table_t->mutable_data<T>(context.GetPlace()); auto t = framework::EigenVector<T>::Flatten(*d_table_t); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); dim3 threads(128, 8); dim3 grids(8, 1); LookupTableGrad<T, 128, 8, 8><<<grids, threads, 0, dev_ctx.stream()>>>( d_table, d_output, ids, N, K, D); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>, ops::LookupTableCUDAKernel<double>, ops::LookupTableCUDAKernel<plat::float16>, ops::LookupTableCUDAKernel<int8_t>); REGISTER_OP_CUDA_KERNEL(lookup_table_grad, ops::LookupTableGradCUDAKernel<float>, ops::LookupTableGradCUDAKernel<double>, ops::LookupTableGradCUDAKernel<plat::float16>);
dad8622c8b430cb89bec56801e589f6d5c074c8f.hip
// !!! This is a file automatically generated by hipify!!! #define GL_GLEXT_PROTOTYPES #include "GL/glut.h" #include "hip/hip_runtime.h" #include "cuda_gl_interop.h" #include "../common/book.h" #include "../common/cpu_bitmap.h" #define DIM 512 GLunit bufferObj; cudaGraphicsResource *resource; // based on ripple code, but uses uchar4, which is the // type of data graphic interop uses __global__ void kernel( uchar4 *ptr ) { // map from threadIdx/blockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; // now calculate the value at that position float fx = x / (float)DIM - 0.5f; float fy = y / (float)DIM - 0.5f; unsigned char green = 128 + 127 * sin( abs(fx * 100) - abs(fy * 100) ); // accessing uchar4 vs. unsigned char* ptr[offset].x = 0; ptr[offset].y = green; ptr[offset].z = 0; ptr[offset].w = 255; } static void draw_func( void ) { glDrawPixels( DIM, DIM, GL_RGBA, GL_UNSIGNED_BYTE, 0 ); glutSwapBuffers(); } static void key_func( unsigned char key, int x, int y ){ switch (key) { case 27: // clean up OpenGL and CUDA HANDLE_ERROR( hipGraphicsUnregisterResource( resource ) ); glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, 0 ); glDeleteBuffers( 1, &bufferObj ); exit( 0 ); } } int main( int argc, char **argv ) { hipDeviceProp_t prop; int dev; memset( &prop, 0, sizeof( hipDeviceProp_t ) ); prop.major = 1; prop.minor = 0; HANDLE_ERROR( hipChooseDevice( &dev, &prop ) ); HANDLE_ERROR( hipGLSetGLDevice( dev ) ); // these GLUT calls need to be made before the other GL calls glutInit( &argc, argv ); glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA ); glutInitWindowSize( DIM, DIM ); glutCreateWindow( "bitmap" ); glGenBuffers( 1, &bufferObj ); glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj ); glBufferData( GL_PIXEL_UNPACK_BUFFER_ARB, DIM * DIM * 4, NULL, GL_DYNAMIC_DRAW_ARB ); HANDLE_ERROR( hipGraphicsGLRegisterBuffer( &resource, bufferObj, hipGraphicsMapFlagsNone ) ); uchar4* devPtr; size_t size; HANDLE_ERROR( hipGraphicsMapResources( 1, &resource, NULL ) ); HANDLE_ERROR( hipGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, resource ) ); dim3 grids( DIM / 16, DIM / 16 ); dim3 threads( 16, 16 ); hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, devPtr ); HANDLE_ERROR( hipGraphicsUnmapResources( 1, &resource, NULL ) ); // set up GLUT and kick off main loop glutKeyboardFunc( key_func ); glutDisplayFunc( draw_func ); glutMainLoop(); }
dad8622c8b430cb89bec56801e589f6d5c074c8f.cu
#define GL_GLEXT_PROTOTYPES #include "GL/glut.h" #include "cuda.h" #include "cuda_gl_interop.h" #include "../common/book.h" #include "../common/cpu_bitmap.h" #define DIM 512 GLunit bufferObj; cudaGraphicsResource *resource; // based on ripple code, but uses uchar4, which is the // type of data graphic interop uses __global__ void kernel( uchar4 *ptr ) { // map from threadIdx/blockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; // now calculate the value at that position float fx = x / (float)DIM - 0.5f; float fy = y / (float)DIM - 0.5f; unsigned char green = 128 + 127 * sin( abs(fx * 100) - abs(fy * 100) ); // accessing uchar4 vs. unsigned char* ptr[offset].x = 0; ptr[offset].y = green; ptr[offset].z = 0; ptr[offset].w = 255; } static void draw_func( void ) { glDrawPixels( DIM, DIM, GL_RGBA, GL_UNSIGNED_BYTE, 0 ); glutSwapBuffers(); } static void key_func( unsigned char key, int x, int y ){ switch (key) { case 27: // clean up OpenGL and CUDA HANDLE_ERROR( cudaGraphicsUnregisterResource( resource ) ); glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, 0 ); glDeleteBuffers( 1, &bufferObj ); exit( 0 ); } } int main( int argc, char **argv ) { cudaDeviceProp prop; int dev; memset( &prop, 0, sizeof( cudaDeviceProp ) ); prop.major = 1; prop.minor = 0; HANDLE_ERROR( cudaChooseDevice( &dev, &prop ) ); HANDLE_ERROR( cudaGLSetGLDevice( dev ) ); // these GLUT calls need to be made before the other GL calls glutInit( &argc, argv ); glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA ); glutInitWindowSize( DIM, DIM ); glutCreateWindow( "bitmap" ); glGenBuffers( 1, &bufferObj ); glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj ); glBufferData( GL_PIXEL_UNPACK_BUFFER_ARB, DIM * DIM * 4, NULL, GL_DYNAMIC_DRAW_ARB ); HANDLE_ERROR( cudaGraphicsGLRegisterBuffer( &resource, bufferObj, cudaGraphicsMapFlagsNone ) ); uchar4* devPtr; size_t size; HANDLE_ERROR( cudaGraphicsMapResources( 1, &resource, NULL ) ); HANDLE_ERROR( cudaGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, resource ) ); dim3 grids( DIM / 16, DIM / 16 ); dim3 threads( 16, 16 ); kernel<<<grids, threads>>>( devPtr ); HANDLE_ERROR( cudaGraphicsUnmapResources( 1, &resource, NULL ) ); // set up GLUT and kick off main loop glutKeyboardFunc( key_func ); glutDisplayFunc( draw_func ); glutMainLoop(); }
3a245377e82ba3889da34cdfe2dc5f6e70bb4400.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "open3d/ml/contrib/PointSampling.cuh" namespace open3d { namespace ml { namespace contrib { __global__ void gather_points_kernel(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, M) // output: // out: (B, C, M) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; points += bs_idx * c * n + c_idx * n; out[0] = points[idx[0]]; } __global__ void gather_points_grad_kernel(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { // grad_out: (B, C, M) // idx: (B, M) // output: // grad_points: (B, C, N) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; grad_out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; grad_points += bs_idx * c * n + c_idx * n; atomicAdd(grad_points + idx[0], grad_out[0]); } } // namespace contrib } // namespace ml } // namespace open3d
3a245377e82ba3889da34cdfe2dc5f6e70bb4400.cu
#include "open3d/ml/contrib/PointSampling.cuh" namespace open3d { namespace ml { namespace contrib { __global__ void gather_points_kernel(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, M) // output: // out: (B, C, M) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; points += bs_idx * c * n + c_idx * n; out[0] = points[idx[0]]; } __global__ void gather_points_grad_kernel(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { // grad_out: (B, C, M) // idx: (B, M) // output: // grad_points: (B, C, N) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; grad_out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; grad_points += bs_idx * c * n + c_idx * n; atomicAdd(grad_points + idx[0], grad_out[0]); } } // namespace contrib } // namespace ml } // namespace open3d
6a8df42aedc82fb367fa030e16ec56fbcbcd637e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/row_conv_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; using framework::Tensor; namespace { inline int DivUp(int x, int y) { return (x + y - 1) / y; } // Forward prop (shared memory version, for small future_context) template <typename T> __global__ void RowConvForwardSharedMemory(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (d < input_dim) ? sw[w * blx + thx] * in[(start + k + w) * input_dim + d] : static_cast<T>(0); } if (d < input_dim) { out[(start + k) * input_dim + d] = sum; } } } } // Forward prop (naive version) template <typename T> __global__ void RowConvForward(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (wt[w * input_dim + d] * in[(start + k + w) * input_dim + d]); } out[(start + k) * input_dim + d] = sum; } } } // Compute input gradient (shared memory version, for small future_context) template <typename T> __global__ void RowConvGradInputSharedMemory(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (d < input_dim) ? (sw[w * blx + thx] * dout[(k + start - w) * input_dim + d]) : static_cast<T>(0); } if (d < input_dim) { din[(k + start) * input_dim + d] = sum; } } } } // Compute input gradient (Naive version) template <typename T> __global__ void RowConvGradInput(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (wt[w * input_dim + d] * dout[(k + start - w) * input_dim + d]); } din[(k + start) * input_dim + d] = sum; } } } // Compute W gradient (small future_context version) template <typename T> __global__ void RowConvGradFilterImproved(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; int xdim_sh_in = block_y; int xdim_sh_dout = block_y; // int xdim_sh_dfilter = future_context; int ydim_sh_in = block_x; int ydim_sh_dout = block_x + future_context - 1; int ydim_sh_dfilter = block_y; T *sh_in = mem; T *sh_dout = &mem[xdim_sh_in * ydim_sh_in]; T *sh_dfilter = &mem[xdim_sh_in * ydim_sh_in + xdim_sh_dout * ydim_sh_dout]; if (thy < future_context) { sh_dfilter[thy * ydim_sh_dfilter + thx] = static_cast<T>(0); } __syncthreads(); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * ydim_sh_in + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : T(0); sh_dout[thx * ydim_sh_dout + thy + future_context - 1] = (d < input_dim && pos < end) ? dout[pos * input_dim + d] : T(0); __syncthreads(); if (thy < future_context - 1) { int pos_offset = pos - future_context + 1; sh_dout[thx * ydim_sh_dout + thy] = (d < input_dim && pos_offset >= start) ? dout[pos_offset * input_dim + d] : T(0); } __syncthreads(); for (int w = 0; w < future_context; w++) { T val = sh_in[thy * ydim_sh_in + thx] * sh_dout[thy * ydim_sh_dout + thx + future_context - 1 - w]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += platform::__shfl_down_sync(0, val, offset); } __syncthreads(); if (thx == 0) { sh_dfilter[w * ydim_sh_dfilter + thy] += val; } __syncthreads(); } } } for (int w = thy; (w < future_context) && (d < input_dim); w += bly) { dfilter[w * input_dim + d] += sh_dfilter[w * ydim_sh_dfilter + thx]; } } // Compute weight(filter) gradient template <typename T> __global__ void RowConvGradFilter(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; T *sh_in = mem; T *sh_dout = &mem[block_x * block_y]; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * block_y + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : 0.0; __syncthreads(); for (int w = 0; w < future_context; w++) { sh_dout[thx * block_y + thy] = (d < input_dim && (k - w) >= 0 && (k - w) < current_timesteps) ? dout[(pos - w) * input_dim + d] : 0.0; __syncthreads(); T val = sh_in[thy * block_y + thx] * sh_dout[thy * block_y + thx]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += platform::__shfl_down_sync(0, val, offset); } __syncthreads(); if (thx == 0 && (gx + thy) < input_dim) { dfilter[w * input_dim + gx + thy] += val; } } } } } } // namespace template <typename T> class RowConvKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<LoDTensor>("X"); auto *Filter = context.Input<Tensor>("Filter"); auto *Out = context.Output<LoDTensor>("Out"); const T *in = X->data<T>(); const T *weight = Filter->data<T>(); T *out = Out->mutable_data<T>(context.GetPlace()); auto batch_indices = X->lod()[0]; int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; size_t *idx = batch_indices.CUDAMutableData(context.GetPlace()); auto stream = context.cuda_device_context().stream(); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); hipLaunchKernelGGL(( RowConvForwardSharedMemory< T>), dim3(grid_dim), dim3(block_dim), mem_per_block, stream, in, weight, num_sequence, input_dim, future_context, idx, out); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); hipLaunchKernelGGL(( RowConvForward<T>), dim3(grid_dim), dim3(block_dim), 0, stream, in, weight, num_sequence, input_dim, future_context, idx, out); } } }; template <typename T> class RowConvGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<LoDTensor>("X"); auto *Filter = context.Input<Tensor>("Filter"); auto *dOut = context.Input<LoDTensor>(framework::GradVarName("Out")); const T *in = X->data<T>(); const T *weights = Filter->data<T>(); const T *dout = dOut->data<T>(); Tensor *dX = context.Output<LoDTensor>(framework::GradVarName("X")); Tensor *dFilter = context.Output<Tensor>(framework::GradVarName("Filter")); auto batch_indices = X->lod()[0]; int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; size_t *idx = batch_indices.CUDAMutableData(context.GetPlace()); auto &device_ctx = context.cuda_device_context(); math::SetConstant<platform::CUDADeviceContext, T> zero; if (dFilter) { T *dfilter = dFilter->mutable_data<T>(context.GetPlace()); zero(device_ctx, dFilter, static_cast<T>(0.0)); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_y * block_x + block_y * (block_x + future_context - 1) + future_context * block_y) * sizeof(T); hipLaunchKernelGGL(( RowConvGradFilterImproved< T>), dim3(grid_dim), dim3(block_dim), mem_per_block, device_ctx.stream(), in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_x * block_y * 2) * sizeof(T); // For 2 arrays of size 32x32 hipLaunchKernelGGL(( RowConvGradFilter< T>), dim3(grid_dim), dim3(block_dim), mem_per_block, device_ctx.stream(), in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } } if (dX) { T *din = dX->mutable_data<T>(context.GetPlace()); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); hipLaunchKernelGGL(( RowConvGradInputSharedMemory< T>), dim3(grid_dim), dim3(block_dim), mem_per_block, device_ctx.stream(), dout, weights, num_sequence, input_dim, future_context, idx, din); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); hipLaunchKernelGGL(( RowConvGradInput<T>), dim3(grid_dim), dim3(block_dim), 0, device_ctx.stream(), dout, weights, num_sequence, input_dim, future_context, idx, din); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( row_conv, ops::RowConvKernel<paddle::platform::CUDADeviceContext, float>); REGISTER_OP_CUDA_KERNEL( row_conv_grad, ops::RowConvGradKernel<paddle::platform::CUDADeviceContext, float>);
6a8df42aedc82fb367fa030e16ec56fbcbcd637e.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/row_conv_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; using framework::Tensor; namespace { inline int DivUp(int x, int y) { return (x + y - 1) / y; } // Forward prop (shared memory version, for small future_context) template <typename T> __global__ void RowConvForwardSharedMemory(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (d < input_dim) ? sw[w * blx + thx] * in[(start + k + w) * input_dim + d] : static_cast<T>(0); } if (d < input_dim) { out[(start + k) * input_dim + d] = sum; } } } } // Forward prop (naive version) template <typename T> __global__ void RowConvForward(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (wt[w * input_dim + d] * in[(start + k + w) * input_dim + d]); } out[(start + k) * input_dim + d] = sum; } } } // Compute input gradient (shared memory version, for small future_context) template <typename T> __global__ void RowConvGradInputSharedMemory(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (d < input_dim) ? (sw[w * blx + thx] * dout[(k + start - w) * input_dim + d]) : static_cast<T>(0); } if (d < input_dim) { din[(k + start) * input_dim + d] = sum; } } } } // Compute input gradient (Naive version) template <typename T> __global__ void RowConvGradInput(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (wt[w * input_dim + d] * dout[(k + start - w) * input_dim + d]); } din[(k + start) * input_dim + d] = sum; } } } // Compute W gradient (small future_context version) template <typename T> __global__ void RowConvGradFilterImproved(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; int xdim_sh_in = block_y; int xdim_sh_dout = block_y; // int xdim_sh_dfilter = future_context; int ydim_sh_in = block_x; int ydim_sh_dout = block_x + future_context - 1; int ydim_sh_dfilter = block_y; T *sh_in = mem; T *sh_dout = &mem[xdim_sh_in * ydim_sh_in]; T *sh_dfilter = &mem[xdim_sh_in * ydim_sh_in + xdim_sh_dout * ydim_sh_dout]; if (thy < future_context) { sh_dfilter[thy * ydim_sh_dfilter + thx] = static_cast<T>(0); } __syncthreads(); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * ydim_sh_in + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : T(0); sh_dout[thx * ydim_sh_dout + thy + future_context - 1] = (d < input_dim && pos < end) ? dout[pos * input_dim + d] : T(0); __syncthreads(); if (thy < future_context - 1) { int pos_offset = pos - future_context + 1; sh_dout[thx * ydim_sh_dout + thy] = (d < input_dim && pos_offset >= start) ? dout[pos_offset * input_dim + d] : T(0); } __syncthreads(); for (int w = 0; w < future_context; w++) { T val = sh_in[thy * ydim_sh_in + thx] * sh_dout[thy * ydim_sh_dout + thx + future_context - 1 - w]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += platform::__shfl_down_sync(0, val, offset); } __syncthreads(); if (thx == 0) { sh_dfilter[w * ydim_sh_dfilter + thy] += val; } __syncthreads(); } } } for (int w = thy; (w < future_context) && (d < input_dim); w += bly) { dfilter[w * input_dim + d] += sh_dfilter[w * ydim_sh_dfilter + thx]; } } // Compute weight(filter) gradient template <typename T> __global__ void RowConvGradFilter(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; T *sh_in = mem; T *sh_dout = &mem[block_x * block_y]; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * block_y + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : 0.0; __syncthreads(); for (int w = 0; w < future_context; w++) { sh_dout[thx * block_y + thy] = (d < input_dim && (k - w) >= 0 && (k - w) < current_timesteps) ? dout[(pos - w) * input_dim + d] : 0.0; __syncthreads(); T val = sh_in[thy * block_y + thx] * sh_dout[thy * block_y + thx]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += platform::__shfl_down_sync(0, val, offset); } __syncthreads(); if (thx == 0 && (gx + thy) < input_dim) { dfilter[w * input_dim + gx + thy] += val; } } } } } } // namespace template <typename T> class RowConvKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<LoDTensor>("X"); auto *Filter = context.Input<Tensor>("Filter"); auto *Out = context.Output<LoDTensor>("Out"); const T *in = X->data<T>(); const T *weight = Filter->data<T>(); T *out = Out->mutable_data<T>(context.GetPlace()); auto batch_indices = X->lod()[0]; int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; size_t *idx = batch_indices.CUDAMutableData(context.GetPlace()); auto stream = context.cuda_device_context().stream(); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); RowConvForwardSharedMemory< T><<<grid_dim, block_dim, mem_per_block, stream>>>( in, weight, num_sequence, input_dim, future_context, idx, out); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); RowConvForward<T><<<grid_dim, block_dim, 0, stream>>>( in, weight, num_sequence, input_dim, future_context, idx, out); } } }; template <typename T> class RowConvGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<LoDTensor>("X"); auto *Filter = context.Input<Tensor>("Filter"); auto *dOut = context.Input<LoDTensor>(framework::GradVarName("Out")); const T *in = X->data<T>(); const T *weights = Filter->data<T>(); const T *dout = dOut->data<T>(); Tensor *dX = context.Output<LoDTensor>(framework::GradVarName("X")); Tensor *dFilter = context.Output<Tensor>(framework::GradVarName("Filter")); auto batch_indices = X->lod()[0]; int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; size_t *idx = batch_indices.CUDAMutableData(context.GetPlace()); auto &device_ctx = context.cuda_device_context(); math::SetConstant<platform::CUDADeviceContext, T> zero; if (dFilter) { T *dfilter = dFilter->mutable_data<T>(context.GetPlace()); zero(device_ctx, dFilter, static_cast<T>(0.0)); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_y * block_x + block_y * (block_x + future_context - 1) + future_context * block_y) * sizeof(T); RowConvGradFilterImproved< T><<<grid_dim, block_dim, mem_per_block, device_ctx.stream()>>>( in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_x * block_y * 2) * sizeof(T); // For 2 arrays of size 32x32 RowConvGradFilter< T><<<grid_dim, block_dim, mem_per_block, device_ctx.stream()>>>( in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } } if (dX) { T *din = dX->mutable_data<T>(context.GetPlace()); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); RowConvGradInputSharedMemory< T><<<grid_dim, block_dim, mem_per_block, device_ctx.stream()>>>( dout, weights, num_sequence, input_dim, future_context, idx, din); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); RowConvGradInput<T><<<grid_dim, block_dim, 0, device_ctx.stream()>>>( dout, weights, num_sequence, input_dim, future_context, idx, din); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( row_conv, ops::RowConvKernel<paddle::platform::CUDADeviceContext, float>); REGISTER_OP_CUDA_KERNEL( row_conv_grad, ops::RowConvGradKernel<paddle::platform::CUDADeviceContext, float>);
370a4ee5f716fc898790a5b86c1f87548fabffd6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void matrixTranspose(unsigned int* A_d, unsigned int *T_d, int rowCount, int colCount) { //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // **** Populate vecADD kernel function **** //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row < rowCount && col < colCount){ T_d[col*rowCount+row] = A_d[row*colCount+col]; } }
370a4ee5f716fc898790a5b86c1f87548fabffd6.cu
#include "includes.h" __global__ void matrixTranspose(unsigned int* A_d, unsigned int *T_d, int rowCount, int colCount) { //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // **** Populate vecADD kernel function **** //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row < rowCount && col < colCount){ T_d[col*rowCount+row] = A_d[row*colCount+col]; } }
euler3d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper //////////////////////////////////////////////////////////////////////////////////////////////////// // file: altis\src\cuda\level2\cfd\euler3d.cu // // summary: Sort class // // origin: Rodinia Benchmark (http://rodinia.cs.virginia.edu/doku.php) //////////////////////////////////////////////////////////////////////////////////////////////////// //#include <cutil.h> #include <iostream> #include <fstream> #include "cudacommon.h" #include "ResultDatabase.h" #include "OptionParser.h" //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Number streams. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define NUM_STREAMS 2 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines seed. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define SEED 7 /* /// <summary> . </summary> * Options * */ #define GAMMA 1.4f //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines iterations. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define iterations 10 // #ifndef block_length // #define block_length 192 // #endif //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines ndim. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define NDIM 3 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines nnb. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define NNB 4 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines rk. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define RK 3 // 3rd order RK //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines ff mach. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define ff_mach 1.2f //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Degrees angle of attack. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define deg_angle_of_attack 0.0f /* * not options */ #ifdef RD_WG_SIZE_0_0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 0. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_0 RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 0. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_0 RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 0. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_0 RD_WG_SIZE #else //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 0. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_0 192 #endif #ifdef RD_WG_SIZE_1_0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 1. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_1 RD_WG_SIZE_1_0 #elif defined(RD_WG_SIZE_1) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 1. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_1 RD_WG_SIZE_1 #elif defined(RD_WG_SIZE) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 1. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_1 RD_WG_SIZE #else //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 1. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_1 192 #endif #ifdef RD_WG_SIZE_2_0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 2. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_2 RD_WG_SIZE_2_0 #elif defined(RD_WG_SIZE_1) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 2. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_2 RD_WG_SIZE_2 #elif defined(RD_WG_SIZE) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 2. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_2 RD_WG_SIZE #else //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 2. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_2 192 #endif #ifdef RD_WG_SIZE_3_0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 3. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_3 RD_WG_SIZE_3_0 #elif defined(RD_WG_SIZE_3) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 3. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_3 RD_WG_SIZE_3 #elif defined(RD_WG_SIZE) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 3. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_3 RD_WG_SIZE #else //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 3. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_3 192 #endif #ifdef RD_WG_SIZE_4_0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 4. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_4 RD_WG_SIZE_4_0 #elif defined(RD_WG_SIZE_4) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 4. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_4 RD_WG_SIZE_4 #elif defined(RD_WG_SIZE) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 4. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_4 RD_WG_SIZE #else //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 4. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_4 192 #endif // #if block_length > 128 // #warning "the kernels may fail too launch on some systems if the block length is too large" // #endif //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Variable density. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define VAR_DENSITY 0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Variable momentum. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define VAR_MOMENTUM 1 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Variable density energy. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines nvar. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define NVAR (VAR_DENSITY_ENERGY+1) /// <summary> The kernel time. </summary> float kernelTime = 0.0f; /// <summary> The transfer time. </summary> float transferTime = 0.0f; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the stop. </summary> /// /// <value> The stop. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// hipEvent_t start, stop; /// <summary> The elapsed. </summary> float elapsed; /* //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Allocs. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="N"> An int to process. </param> /// /// <returns> Null if it fails, else a pointer to a T. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// * Generic functions */ template <typename T> T* alloc(int N) { T* t; CUDA_SAFE_CALL(hipMalloc((void**)&t, sizeof(T)*N)); return t; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Deallocs the given array. </summary> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="array"> [in,out] If non-null, the array. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> void dealloc(T* array) { CUDA_SAFE_CALL(hipFree((void*)array)); } #ifdef HYPERQ //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Copies this. </summary> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="dst"> [in,out] If non-null, destination for the. </param> /// <param name="src"> [in,out] If non-null, source for the. </param> /// <param name="N"> An int to process. </param> /// <param name="stream"> [in,out] If non-null, the stream. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> void copy(T* dst, T* src, int N, hipStream_t *stream) { hipEventRecord(start, 0); CUDA_SAFE_CALL(hipMemcpyAsync((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice, stream[1])); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Copies this. </summary> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="dst"> [in,out] If non-null, destination for the. </param> /// <param name="src"> [in,out] If non-null, source for the. </param> /// <param name="N"> An int to process. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> void copy(T* dst, T* src, int N) { hipEventRecord(start, 0); CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Uploads. </summary> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="dst"> [in,out] If non-null, destination for the. </param> /// <param name="src"> [in,out] If non-null, source for the. </param> /// <param name="N"> An int to process. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> void upload(T* dst, T* src, int N) { hipEventRecord(start, 0); CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Downloads this. </summary> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="dst"> [in,out] If non-null, destination for the. </param> /// <param name="src"> [in,out] If non-null, source for the. </param> /// <param name="N"> An int to process. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> void download(T* dst, T* src, int N) { hipEventRecord(start, 0); CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Dumps. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="nel"> The nel. </param> /// <param name="nelr"> The nelr. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the ff variable[ nvar]. </summary> /// /// <value> The ff variable[ nvar]. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the ff flux contribution momentum x[ 1]. </summary> /// /// <value> The ff flux contribution momentum x[ 1]. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// __constant__ float3 ff_flux_contribution_momentum_x[1]; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the ff flux contribution momentum y[ 1]. </summary> /// /// <value> The ff flux contribution momentum y[ 1]. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// __constant__ float3 ff_flux_contribution_momentum_y[1]; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the ff flux contribution momentum z[ 1]. </summary> /// /// <value> The ff flux contribution momentum z[ 1]. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// __constant__ float3 ff_flux_contribution_momentum_z[1]; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the ff flux contribution density energy[ 1]. </summary> /// /// <value> The ff flux contribution density energy[ 1]. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// __constant__ float3 ff_flux_contribution_density_energy[1]; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cuda initialize variables. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Initializes the variables. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the flux contribution. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="density"> [in,out] The density. </param> /// <param name="momentum"> [in,out] The momentum. </param> /// <param name="density_energy"> [in,out] The density energy. </param> /// <param name="pressure"> [in,out] The pressure. </param> /// <param name="velocity"> [in,out] The velocity. </param> /// <param name="fc_momentum_x"> [in,out] The fc momentum x coordinate. </param> /// <param name="fc_momentum_y"> [in,out] The fc momentum y coordinate. </param> /// <param name="fc_momentum_z"> [in,out] The fc momentum z coordinate. </param> /// <param name="fc_density_energy"> [in,out] The fc density energy. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the velocity. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="density"> [in,out] The density. </param> /// <param name="momentum"> [in,out] The momentum. </param> /// <param name="velocity"> [in,out] The velocity. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the speed sqd. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="velocity"> [in,out] The velocity. </param> /// /// <returns> The calculated speed sqd. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the pressure. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="density"> [in,out] The density. </param> /// <param name="density_energy"> [in,out] The density energy. </param> /// <param name="speed_sqd"> [in,out] The speed sqd. </param> /// /// <returns> The calculated pressure. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the speed of sound. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="density"> [in,out] The density. </param> /// <param name="pressure"> [in,out] The pressure. </param> /// /// <returns> The calculated speed of sound. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cuda compute step factor. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="areas"> [in,out] If non-null, the areas. </param> /// <param name="step_factors"> [in,out] If non-null, the step factors. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } #ifdef HYPERQ //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the step factor. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="areas"> [in,out] If non-null, the areas. </param> /// <param name="step_factors"> [in,out] If non-null, the step factors. </param> /// <param name="stream"> [in,out] If non-null, the stream. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors, hipStream_t *stream) { dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, stream[0], nelr, variables, areas, step_factors); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the step factor. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="areas"> [in,out] If non-null, the areas. </param> /// <param name="step_factors"> [in,out] If non-null, the step factors. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cuda compute flux. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="elements_surrounding_elements"> [in,out] If non-null, the elements /// surrounding elements. </param> /// <param name="normals"> [in,out] If non-null, the normals. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="fluxes"> [in,out] If non-null, the fluxes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the flux. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="elements_surrounding_elements"> [in,out] If non-null, the elements /// surrounding elements. </param> /// <param name="normals"> [in,out] If non-null, the normals. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="fluxes"> [in,out] If non-null, the fluxes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cuda time step. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="j"> An int to process. </param> /// <param name="nelr"> The nelr. </param> /// <param name="old_variables"> [in,out] If non-null, the old variables. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="step_factors"> [in,out] If non-null, the step factors. </param> /// <param name="fluxes"> [in,out] If non-null, the fluxes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Time step. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="j"> An int to process. </param> /// <param name="nelr"> The nelr. </param> /// <param name="old_variables"> [in,out] If non-null, the old variables. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="step_factors"> [in,out] If non-null, the step factors. </param> /// <param name="fluxes"> [in,out] If non-null, the fluxes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Adds a benchmark specifier options. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="op"> [in,out] The operation. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void addBenchmarkSpecOptions(OptionParser &op) { } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cfds. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void cfd(ResultDatabase &resultDB, OptionParser &op); //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Executes the benchmark operation. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { printf("Running CFDSolver\n"); bool quiet = op.getOptionBool("quiet"); if(!quiet) { printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4); } hipEventCreate(&start); hipEventCreate(&stop); int passes = op.getOptionInt("passes"); for(int i = 0; i < passes; i++) { kernelTime = 0.0f; transferTime = 0.0f; if(!quiet) { printf("Pass %d:\n", i); } cfd(resultDB, op); if(!quiet) { printf("Done.\n"); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cfds. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void cfd(ResultDatabase &resultDB, OptionParser &op) { // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_flux_contribution_momentum_x; float3 h_ff_flux_contribution_momentum_y; float3 h_ff_flux_contribution_momentum_z; float3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu hipEventRecord(start, 0); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; { string inputFile = op.getOptionString("inputFile"); std::ifstream file(inputFile.c_str()); if(inputFile != "") { file >> nel; } else { int problemSizes[4] = {97000, 200000, 40000000, 60000000}; nel = problemSizes[op.getOptionInt("size") - 1]; } nelr = BLOCK_SIZE_0*((nel / BLOCK_SIZE_0 )+ ::min(1, nel % BLOCK_SIZE_0)); #ifdef UNIFIED_MEMORY // could use prefetch and advise float *h_areas = NULL; CUDA_SAFE_CALL(hipMallocManaged(&h_areas, nelr * sizeof(float))); int *h_elements_surrounding_elements = NULL; CUDA_SAFE_CALL(hipMallocManaged(&h_elements_surrounding_elements, nelr * NNB * sizeof(int))); float *h_normals = NULL; CUDA_SAFE_CALL(hipMallocManaged(&h_normals, nelr * NDIM * NNB * sizeof(float))); #else float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; #endif srand(SEED); // read in data for(int i = 0; i < nel; i++) { if(inputFile != "") { file >> h_areas[i]; } else { h_areas[i] = 1.0 * rand() / RAND_MAX; } for(int j = 0; j < NNB; j++) // NNB is always 4 { if(inputFile != "") { file >> h_elements_surrounding_elements[i + j*nelr]; } else { int val = i + (rand() % 20) - 10; h_elements_surrounding_elements[i + j * nelr] = val; } if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) // NDIM is always 3 { if(inputFile != "") { file >> h_normals[i + (j + k*NNB)*nelr]; } else { h_normals[i + (j + k*NNB)*nelr] = 1.0 * rand() / RAND_MAX - 0.5; } h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } #ifdef UNIFIED_MEMORY areas = h_areas; elements_surrounding_elements = h_elements_surrounding_elements; normals = h_normals; #else areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; #endif } // Create arrays and set initial conditions #ifdef UNIFIED_MEMORY float *variables = NULL; CUDA_SAFE_CALL(hipMallocManaged(&variables, nelr*NVAR*sizeof(float))); #else float* variables = alloc<float>(nelr*NVAR); #endif initialize_variables(nelr, variables); #ifdef UNIFIED_MEMORY float *old_variables = NULL; CUDA_SAFE_CALL(hipMallocManaged(&old_variables, nelr*NVAR*sizeof(float))); float *fluxes = NULL; CUDA_SAFE_CALL(hipMallocManaged(&fluxes, nelr*NVAR*sizeof(float))); float *step_factors = NULL; CUDA_SAFE_CALL(hipMallocManaged(&step_factors, nelr*sizeof(float))); #else float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); #endif // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); hipMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing hipDeviceSynchronize(); // these need to be computed the first time in order to compute time step // unsigned int timer = 0; // CUT_SAFE_CALL( cutCreateTimer( &timer)); // CUT_SAFE_CALL( cutStartTimer( timer)); // Begin iterations #ifdef HYPERQ // Only 2 here, may change later hipStream_t streams[NUM_STREAMS]; for (int s = 0; s < NUM_STREAMS; s++) { CUDA_SAFE_CALL(hipStreamCreate(&streams[s])); } #endif for(int i = 0; i < iterations; i++) { // Time will need to be recomputed, more aggressive optimization TODO #ifdef HYPERQ copy<float>(old_variables, variables, nelr*NVAR, streams); compute_step_factor(nelr, variables, areas, step_factors, streams); #else copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); #endif CHECK_CUDA_ERROR(); for(int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); CHECK_CUDA_ERROR(); time_step(j, nelr, old_variables, variables, step_factors, fluxes); CHECK_CUDA_ERROR(); } } hipDeviceSynchronize(); // CUT_SAFE_CALL( cutStopTimer(timer) ); if(op.getOptionBool("verbose")) { dump(variables, nel, nelr); } #ifdef HYPERQ // Only 2 here, may change later for (int s = 0; s < NUM_STREAMS; s++) { CUDA_SAFE_CALL(hipStreamDestroy(streams[s])); } #endif #ifdef UNIFIED_MEMORY CUDA_SAFE_CALL(hipFree(areas)); CUDA_SAFE_CALL(hipFree(elements_surrounding_elements)); CUDA_SAFE_CALL(hipFree(normals)); CUDA_SAFE_CALL(hipFree(variables)); CUDA_SAFE_CALL(hipFree(old_variables)); CUDA_SAFE_CALL(hipFree(fluxes)); CUDA_SAFE_CALL(hipFree(step_factors)); #else dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); #endif char atts[1024]; sprintf(atts, "numelements:%d", nel); resultDB.AddResult("cfd_kernel_time", atts, "sec", kernelTime); resultDB.AddResult("cfd_transfer_time", atts, "sec", transferTime); resultDB.AddResult("cfd_parity", atts, "N", transferTime / kernelTime); resultDB.AddOverall("Time", "sec", kernelTime+transferTime); }
euler3d.cu
// Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper //////////////////////////////////////////////////////////////////////////////////////////////////// // file: altis\src\cuda\level2\cfd\euler3d.cu // // summary: Sort class // // origin: Rodinia Benchmark (http://rodinia.cs.virginia.edu/doku.php) //////////////////////////////////////////////////////////////////////////////////////////////////// //#include <cutil.h> #include <iostream> #include <fstream> #include "cudacommon.h" #include "ResultDatabase.h" #include "OptionParser.h" //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Number streams. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define NUM_STREAMS 2 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines seed. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define SEED 7 /* /// <summary> . </summary> * Options * */ #define GAMMA 1.4f //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines iterations. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define iterations 10 // #ifndef block_length // #define block_length 192 // #endif //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines ndim. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define NDIM 3 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines nnb. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define NNB 4 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines rk. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define RK 3 // 3rd order RK //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines ff mach. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define ff_mach 1.2f //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Degrees angle of attack. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define deg_angle_of_attack 0.0f /* * not options */ #ifdef RD_WG_SIZE_0_0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 0. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_0 RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 0. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_0 RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 0. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_0 RD_WG_SIZE #else //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 0. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_0 192 #endif #ifdef RD_WG_SIZE_1_0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 1. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_1 RD_WG_SIZE_1_0 #elif defined(RD_WG_SIZE_1) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 1. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_1 RD_WG_SIZE_1 #elif defined(RD_WG_SIZE) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 1. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_1 RD_WG_SIZE #else //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 1. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_1 192 #endif #ifdef RD_WG_SIZE_2_0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 2. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_2 RD_WG_SIZE_2_0 #elif defined(RD_WG_SIZE_1) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 2. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_2 RD_WG_SIZE_2 #elif defined(RD_WG_SIZE) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 2. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_2 RD_WG_SIZE #else //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 2. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_2 192 #endif #ifdef RD_WG_SIZE_3_0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 3. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_3 RD_WG_SIZE_3_0 #elif defined(RD_WG_SIZE_3) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 3. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_3 RD_WG_SIZE_3 #elif defined(RD_WG_SIZE) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 3. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_3 RD_WG_SIZE #else //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 3. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_3 192 #endif #ifdef RD_WG_SIZE_4_0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 4. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_4 RD_WG_SIZE_4_0 #elif defined(RD_WG_SIZE_4) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 4. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_4 RD_WG_SIZE_4 #elif defined(RD_WG_SIZE) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 4. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_4 RD_WG_SIZE #else //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines block size 4. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_4 192 #endif // #if block_length > 128 // #warning "the kernels may fail too launch on some systems if the block length is too large" // #endif //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Variable density. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define VAR_DENSITY 0 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Variable momentum. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define VAR_MOMENTUM 1 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Variable density energy. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines nvar. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define NVAR (VAR_DENSITY_ENERGY+1) /// <summary> The kernel time. </summary> float kernelTime = 0.0f; /// <summary> The transfer time. </summary> float transferTime = 0.0f; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the stop. </summary> /// /// <value> The stop. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// cudaEvent_t start, stop; /// <summary> The elapsed. </summary> float elapsed; /* //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Allocs. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="N"> An int to process. </param> /// /// <returns> Null if it fails, else a pointer to a T. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// * Generic functions */ template <typename T> T* alloc(int N) { T* t; CUDA_SAFE_CALL(cudaMalloc((void**)&t, sizeof(T)*N)); return t; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Deallocs the given array. </summary> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="array"> [in,out] If non-null, the array. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> void dealloc(T* array) { CUDA_SAFE_CALL(cudaFree((void*)array)); } #ifdef HYPERQ //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Copies this. </summary> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="dst"> [in,out] If non-null, destination for the. </param> /// <param name="src"> [in,out] If non-null, source for the. </param> /// <param name="N"> An int to process. </param> /// <param name="stream"> [in,out] If non-null, the stream. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> void copy(T* dst, T* src, int N, cudaStream_t *stream) { cudaEventRecord(start, 0); CUDA_SAFE_CALL(cudaMemcpyAsync((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice, stream[1])); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Copies this. </summary> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="dst"> [in,out] If non-null, destination for the. </param> /// <param name="src"> [in,out] If non-null, source for the. </param> /// <param name="N"> An int to process. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> void copy(T* dst, T* src, int N) { cudaEventRecord(start, 0); CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Uploads. </summary> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="dst"> [in,out] If non-null, destination for the. </param> /// <param name="src"> [in,out] If non-null, source for the. </param> /// <param name="N"> An int to process. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> void upload(T* dst, T* src, int N) { cudaEventRecord(start, 0); CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Downloads this. </summary> /// /// <typeparam name="T"> Generic type parameter. </typeparam> /// <param name="dst"> [in,out] If non-null, destination for the. </param> /// <param name="src"> [in,out] If non-null, source for the. </param> /// <param name="N"> An int to process. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> void download(T* dst, T* src, int N) { cudaEventRecord(start, 0); CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Dumps. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="nel"> The nel. </param> /// <param name="nelr"> The nelr. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the ff variable[ nvar]. </summary> /// /// <value> The ff variable[ nvar]. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the ff flux contribution momentum x[ 1]. </summary> /// /// <value> The ff flux contribution momentum x[ 1]. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// __constant__ float3 ff_flux_contribution_momentum_x[1]; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the ff flux contribution momentum y[ 1]. </summary> /// /// <value> The ff flux contribution momentum y[ 1]. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// __constant__ float3 ff_flux_contribution_momentum_y[1]; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the ff flux contribution momentum z[ 1]. </summary> /// /// <value> The ff flux contribution momentum z[ 1]. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// __constant__ float3 ff_flux_contribution_momentum_z[1]; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Gets the ff flux contribution density energy[ 1]. </summary> /// /// <value> The ff flux contribution density energy[ 1]. </value> //////////////////////////////////////////////////////////////////////////////////////////////////// __constant__ float3 ff_flux_contribution_density_energy[1]; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cuda initialize variables. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Initializes the variables. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1); cudaEventRecord(start, 0); cuda_initialize_variables<<<Dg, Db>>>(nelr, variables); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the flux contribution. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="density"> [in,out] The density. </param> /// <param name="momentum"> [in,out] The momentum. </param> /// <param name="density_energy"> [in,out] The density energy. </param> /// <param name="pressure"> [in,out] The pressure. </param> /// <param name="velocity"> [in,out] The velocity. </param> /// <param name="fc_momentum_x"> [in,out] The fc momentum x coordinate. </param> /// <param name="fc_momentum_y"> [in,out] The fc momentum y coordinate. </param> /// <param name="fc_momentum_z"> [in,out] The fc momentum z coordinate. </param> /// <param name="fc_density_energy"> [in,out] The fc density energy. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the velocity. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="density"> [in,out] The density. </param> /// <param name="momentum"> [in,out] The momentum. </param> /// <param name="velocity"> [in,out] The velocity. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the speed sqd. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="velocity"> [in,out] The velocity. </param> /// /// <returns> The calculated speed sqd. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the pressure. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="density"> [in,out] The density. </param> /// <param name="density_energy"> [in,out] The density energy. </param> /// <param name="speed_sqd"> [in,out] The speed sqd. </param> /// /// <returns> The calculated pressure. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the speed of sound. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="density"> [in,out] The density. </param> /// <param name="pressure"> [in,out] The pressure. </param> /// /// <returns> The calculated speed of sound. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cuda compute step factor. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="areas"> [in,out] If non-null, the areas. </param> /// <param name="step_factors"> [in,out] If non-null, the step factors. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } #ifdef HYPERQ //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the step factor. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="areas"> [in,out] If non-null, the areas. </param> /// <param name="step_factors"> [in,out] If non-null, the step factors. </param> /// <param name="stream"> [in,out] If non-null, the stream. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors, cudaStream_t *stream) { dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2); cudaEventRecord(start, 0); cuda_compute_step_factor<<<Dg, Db, 0, stream[0]>>>(nelr, variables, areas, step_factors); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the step factor. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="areas"> [in,out] If non-null, the areas. </param> /// <param name="step_factors"> [in,out] If non-null, the step factors. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2); cudaEventRecord(start, 0); cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cuda compute flux. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="elements_surrounding_elements"> [in,out] If non-null, the elements /// surrounding elements. </param> /// <param name="normals"> [in,out] If non-null, the normals. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="fluxes"> [in,out] If non-null, the fluxes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Calculates the flux. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="nelr"> The nelr. </param> /// <param name="elements_surrounding_elements"> [in,out] If non-null, the elements /// surrounding elements. </param> /// <param name="normals"> [in,out] If non-null, the normals. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="fluxes"> [in,out] If non-null, the fluxes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3); cudaEventRecord(start, 0); cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cuda time step. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="j"> An int to process. </param> /// <param name="nelr"> The nelr. </param> /// <param name="old_variables"> [in,out] If non-null, the old variables. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="step_factors"> [in,out] If non-null, the step factors. </param> /// <param name="fluxes"> [in,out] If non-null, the fluxes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Time step. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="j"> An int to process. </param> /// <param name="nelr"> The nelr. </param> /// <param name="old_variables"> [in,out] If non-null, the old variables. </param> /// <param name="variables"> [in,out] If non-null, the variables. </param> /// <param name="step_factors"> [in,out] If non-null, the step factors. </param> /// <param name="fluxes"> [in,out] If non-null, the fluxes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4); cudaEventRecord(start, 0); cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Adds a benchmark specifier options. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="op"> [in,out] The operation. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void addBenchmarkSpecOptions(OptionParser &op) { } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cfds. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void cfd(ResultDatabase &resultDB, OptionParser &op); //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Executes the benchmark operation. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { printf("Running CFDSolver\n"); bool quiet = op.getOptionBool("quiet"); if(!quiet) { printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4); } cudaEventCreate(&start); cudaEventCreate(&stop); int passes = op.getOptionInt("passes"); for(int i = 0; i < passes; i++) { kernelTime = 0.0f; transferTime = 0.0f; if(!quiet) { printf("Pass %d:\n", i); } cfd(resultDB, op); if(!quiet) { printf("Done.\n"); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Cfds. </summary> /// /// <remarks> Ed, 5/20/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void cfd(ResultDatabase &resultDB, OptionParser &op) { // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_flux_contribution_momentum_x; float3 h_ff_flux_contribution_momentum_y; float3 h_ff_flux_contribution_momentum_z; float3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu cudaEventRecord(start, 0); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) ); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; { string inputFile = op.getOptionString("inputFile"); std::ifstream file(inputFile.c_str()); if(inputFile != "") { file >> nel; } else { int problemSizes[4] = {97000, 200000, 40000000, 60000000}; nel = problemSizes[op.getOptionInt("size") - 1]; } nelr = BLOCK_SIZE_0*((nel / BLOCK_SIZE_0 )+ std::min(1, nel % BLOCK_SIZE_0)); #ifdef UNIFIED_MEMORY // could use prefetch and advise float *h_areas = NULL; CUDA_SAFE_CALL(cudaMallocManaged(&h_areas, nelr * sizeof(float))); int *h_elements_surrounding_elements = NULL; CUDA_SAFE_CALL(cudaMallocManaged(&h_elements_surrounding_elements, nelr * NNB * sizeof(int))); float *h_normals = NULL; CUDA_SAFE_CALL(cudaMallocManaged(&h_normals, nelr * NDIM * NNB * sizeof(float))); #else float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; #endif srand(SEED); // read in data for(int i = 0; i < nel; i++) { if(inputFile != "") { file >> h_areas[i]; } else { h_areas[i] = 1.0 * rand() / RAND_MAX; } for(int j = 0; j < NNB; j++) // NNB is always 4 { if(inputFile != "") { file >> h_elements_surrounding_elements[i + j*nelr]; } else { int val = i + (rand() % 20) - 10; h_elements_surrounding_elements[i + j * nelr] = val; } if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) // NDIM is always 3 { if(inputFile != "") { file >> h_normals[i + (j + k*NNB)*nelr]; } else { h_normals[i + (j + k*NNB)*nelr] = 1.0 * rand() / RAND_MAX - 0.5; } h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } #ifdef UNIFIED_MEMORY areas = h_areas; elements_surrounding_elements = h_elements_surrounding_elements; normals = h_normals; #else areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; #endif } // Create arrays and set initial conditions #ifdef UNIFIED_MEMORY float *variables = NULL; CUDA_SAFE_CALL(cudaMallocManaged(&variables, nelr*NVAR*sizeof(float))); #else float* variables = alloc<float>(nelr*NVAR); #endif initialize_variables(nelr, variables); #ifdef UNIFIED_MEMORY float *old_variables = NULL; CUDA_SAFE_CALL(cudaMallocManaged(&old_variables, nelr*NVAR*sizeof(float))); float *fluxes = NULL; CUDA_SAFE_CALL(cudaMallocManaged(&fluxes, nelr*NVAR*sizeof(float))); float *step_factors = NULL; CUDA_SAFE_CALL(cudaMallocManaged(&step_factors, nelr*sizeof(float))); #else float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); #endif // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); cudaMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing cudaDeviceSynchronize(); // these need to be computed the first time in order to compute time step // unsigned int timer = 0; // CUT_SAFE_CALL( cutCreateTimer( &timer)); // CUT_SAFE_CALL( cutStartTimer( timer)); // Begin iterations #ifdef HYPERQ // Only 2 here, may change later cudaStream_t streams[NUM_STREAMS]; for (int s = 0; s < NUM_STREAMS; s++) { CUDA_SAFE_CALL(cudaStreamCreate(&streams[s])); } #endif for(int i = 0; i < iterations; i++) { // Time will need to be recomputed, more aggressive optimization TODO #ifdef HYPERQ copy<float>(old_variables, variables, nelr*NVAR, streams); compute_step_factor(nelr, variables, areas, step_factors, streams); #else copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); #endif CHECK_CUDA_ERROR(); for(int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); CHECK_CUDA_ERROR(); time_step(j, nelr, old_variables, variables, step_factors, fluxes); CHECK_CUDA_ERROR(); } } cudaDeviceSynchronize(); // CUT_SAFE_CALL( cutStopTimer(timer) ); if(op.getOptionBool("verbose")) { dump(variables, nel, nelr); } #ifdef HYPERQ // Only 2 here, may change later for (int s = 0; s < NUM_STREAMS; s++) { CUDA_SAFE_CALL(cudaStreamDestroy(streams[s])); } #endif #ifdef UNIFIED_MEMORY CUDA_SAFE_CALL(cudaFree(areas)); CUDA_SAFE_CALL(cudaFree(elements_surrounding_elements)); CUDA_SAFE_CALL(cudaFree(normals)); CUDA_SAFE_CALL(cudaFree(variables)); CUDA_SAFE_CALL(cudaFree(old_variables)); CUDA_SAFE_CALL(cudaFree(fluxes)); CUDA_SAFE_CALL(cudaFree(step_factors)); #else dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); #endif char atts[1024]; sprintf(atts, "numelements:%d", nel); resultDB.AddResult("cfd_kernel_time", atts, "sec", kernelTime); resultDB.AddResult("cfd_transfer_time", atts, "sec", transferTime); resultDB.AddResult("cfd_parity", atts, "N", transferTime / kernelTime); resultDB.AddOverall("Time", "sec", kernelTime+transferTime); }
4a53990c01ae7da42f3208163fb26dcd5f775317.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to hipFree in Boids::endSimulation. hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!"); hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. hipDeviceSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); hipDeviceSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves // Rule 2: boids try to stay a distance d away from each other // Rule 3: boids try to match the speed of surrounding boids return glm::vec3(0.0f, 0.0f, 0.0f); } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // Compute a new velocity based on pos and vel1 // Clamp the speed // Record the new velocity into vel2. Question: why NOT vel1? } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // TODO-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // DIFFERENCE: For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. // TODO-1.2 ping-pong the velocity buffers } void Boids::stepSimulationScatteredGrid(float dt) { // TODO-2.1 // Uniform Grid Neighbor search using Thrust sort. // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. // In Parallel: // - Label each particle with its array index as well as its grid index. // Use 2x width grids // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. } void Boids::endSimulation() { hipFree(dev_vel1); hipFree(dev_vel2); hipFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; hipMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!"); hipMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice); hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost); hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; hipFree(dev_intKeys); hipFree(dev_intValues); checkCUDAErrorWithLine("hipFree failed!"); return; }
4a53990c01ae7da42f3208163fb26dcd5f775317.cu
#define GLM_FORCE_CUDA #include <stdio.h> #include <cuda.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to cudaFree in Boids::endSimulation. cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!"); cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. cudaDeviceSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); cudaDeviceSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves // Rule 2: boids try to stay a distance d away from each other // Rule 3: boids try to match the speed of surrounding boids return glm::vec3(0.0f, 0.0f, 0.0f); } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // Compute a new velocity based on pos and vel1 // Clamp the speed // Record the new velocity into vel2. Question: why NOT vel1? } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // TODO-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // DIFFERENCE: For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. // TODO-1.2 ping-pong the velocity buffers } void Boids::stepSimulationScatteredGrid(float dt) { // TODO-2.1 // Uniform Grid Neighbor search using Thrust sort. // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. // In Parallel: // - Label each particle with its array index as well as its grid index. // Use 2x width grids // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. } void Boids::endSimulation() { cudaFree(dev_vel1); cudaFree(dev_vel2); cudaFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; cudaMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!"); cudaMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; cudaFree(dev_intKeys); cudaFree(dev_intValues); checkCUDAErrorWithLine("cudaFree failed!"); return; }
01e0b3164dd7f4c9d6c1d20d6e5819008ae38cbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hist.cuh" #include "hist_2_one_byte_base.cuh" #include "tuning_policy_enums.cuh" #include <hip/hip_cooperative_groups.h> #include <library/cuda/wrappers/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> using namespace cooperative_groups; namespace NKernel { template <int BlockSize> struct TPointHist2OneByte<5, BlockSize> : public TPointHist2OneByteBase<TPointHist2OneByte<5, BlockSize>, BlockSize> { using TParent = TPointHist2OneByteBase<TPointHist2OneByte<5, BlockSize>, BlockSize>; using TPointHist2OneByteBase<TPointHist2OneByte<5, BlockSize>, BlockSize>::Histogram; __forceinline__ __device__ TPointHist2OneByte(float* buffer) : TPointHist2OneByteBase<TPointHist2OneByte<5, BlockSize>, BlockSize>(buffer) { } __forceinline__ __device__ int SliceOffset() { const int warpId = (threadIdx.x / 32); const int warpOffset = 1024 * warpId; const int blocks = 4; const int innerHistStart = (threadIdx.x & ((blocks - 1) << 3)); return warpOffset + innerHistStart; } template <int N> __forceinline__ __device__ void AddPointsImpl(const ui32* ci, const float* s1, const float* s2) { thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block()); const bool flag = threadIdx.x & 1; float stat1[N]; float stat2[N]; #pragma unroll for (int k = 0; k < N; ++k) { stat1[k] = flag ? s2[k] : s1[k]; stat2[k] = flag ? s1[k] : s2[k]; } #pragma unroll for (int i = 0; i < 4; i++) { const int f = ((2 * i + threadIdx.x) & 6); int offsets[N]; bool pass[N]; #pragma unroll for (int k =0; k < N; ++k) { const int bin = (ci[k] >> (24 - (f << 2))) & 255; offsets[k] = f + 32 * (bin & 31); pass[k] = bin != 32; } syncTile.sync(); #pragma unroll for (int k = 0; k < N; ++k) { int offset = offsets[k]; const int offset1 = offset + flag; const float add1 = pass[k] ? stat1[k] : 0.0f; Histogram[offset1] += add1; } syncTile.sync(); #pragma unroll for (int k = 0; k < N; ++k) { int offset = offsets[k]; const int offset2 = offset + !flag; const float add2 = pass[k] ? stat2[k] : 0.0f; Histogram[offset2] += add2; } } } static constexpr int MaxBits() { return 5; } __forceinline__ __device__ void Reduce() { TParent::ReduceToOneWarp(); if (threadIdx.x < 256) { const int isSecondStat = threadIdx.x & 1; const int f = threadIdx.x / 64; float sum = 0.0f; const int fold = (threadIdx.x >> 1) & 31; const int maxFoldCount = 32; if (fold < maxFoldCount) { const int innerHistCount = 4; const volatile float* __restrict__ src = Histogram + 2048 //warpHistSize + 32 * fold + 2 * f + isSecondStat; #pragma unroll for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) { sum += src[(inWarpHist << 3)]; } Histogram[maxFoldCount * 4 * isSecondStat + maxFoldCount * f + fold] = sum; } } __syncthreads(); } }; DefineHist2Pass(5) }
01e0b3164dd7f4c9d6c1d20d6e5819008ae38cbf.cu
#include "hist.cuh" #include "hist_2_one_byte_base.cuh" #include "tuning_policy_enums.cuh" #include <cooperative_groups.h> #include <library/cuda/wrappers/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> using namespace cooperative_groups; namespace NKernel { template <int BlockSize> struct TPointHist2OneByte<5, BlockSize> : public TPointHist2OneByteBase<TPointHist2OneByte<5, BlockSize>, BlockSize> { using TParent = TPointHist2OneByteBase<TPointHist2OneByte<5, BlockSize>, BlockSize>; using TPointHist2OneByteBase<TPointHist2OneByte<5, BlockSize>, BlockSize>::Histogram; __forceinline__ __device__ TPointHist2OneByte(float* buffer) : TPointHist2OneByteBase<TPointHist2OneByte<5, BlockSize>, BlockSize>(buffer) { } __forceinline__ __device__ int SliceOffset() { const int warpId = (threadIdx.x / 32); const int warpOffset = 1024 * warpId; const int blocks = 4; const int innerHistStart = (threadIdx.x & ((blocks - 1) << 3)); return warpOffset + innerHistStart; } template <int N> __forceinline__ __device__ void AddPointsImpl(const ui32* ci, const float* s1, const float* s2) { thread_block_tile<8> syncTile = tiled_partition<8>(this_thread_block()); const bool flag = threadIdx.x & 1; float stat1[N]; float stat2[N]; #pragma unroll for (int k = 0; k < N; ++k) { stat1[k] = flag ? s2[k] : s1[k]; stat2[k] = flag ? s1[k] : s2[k]; } #pragma unroll for (int i = 0; i < 4; i++) { const int f = ((2 * i + threadIdx.x) & 6); int offsets[N]; bool pass[N]; #pragma unroll for (int k =0; k < N; ++k) { const int bin = (ci[k] >> (24 - (f << 2))) & 255; offsets[k] = f + 32 * (bin & 31); pass[k] = bin != 32; } syncTile.sync(); #pragma unroll for (int k = 0; k < N; ++k) { int offset = offsets[k]; const int offset1 = offset + flag; const float add1 = pass[k] ? stat1[k] : 0.0f; Histogram[offset1] += add1; } syncTile.sync(); #pragma unroll for (int k = 0; k < N; ++k) { int offset = offsets[k]; const int offset2 = offset + !flag; const float add2 = pass[k] ? stat2[k] : 0.0f; Histogram[offset2] += add2; } } } static constexpr int MaxBits() { return 5; } __forceinline__ __device__ void Reduce() { TParent::ReduceToOneWarp(); if (threadIdx.x < 256) { const int isSecondStat = threadIdx.x & 1; const int f = threadIdx.x / 64; float sum = 0.0f; const int fold = (threadIdx.x >> 1) & 31; const int maxFoldCount = 32; if (fold < maxFoldCount) { const int innerHistCount = 4; const volatile float* __restrict__ src = Histogram + 2048 //warpHistSize + 32 * fold + 2 * f + isSecondStat; #pragma unroll for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) { sum += src[(inWarpHist << 3)]; } Histogram[maxFoldCount * 4 * isSecondStat + maxFoldCount * f + fold] = sum; } } __syncthreads(); } }; DefineHist2Pass(5) }
c7e675f1226bb3f83174ee01aea058195cc0fc71.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "invert.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; uchar4 *data = NULL; hipMalloc(&data, XSIZE*YSIZE); int w = XSIZE; int h = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( invert), dim3(gridBlock),dim3(threadBlock), 0, 0, data,w,h); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( invert), dim3(gridBlock),dim3(threadBlock), 0, 0, data,w,h); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( invert), dim3(gridBlock),dim3(threadBlock), 0, 0, data,w,h); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c7e675f1226bb3f83174ee01aea058195cc0fc71.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "invert.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; uchar4 *data = NULL; cudaMalloc(&data, XSIZE*YSIZE); int w = XSIZE; int h = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); invert<<<gridBlock,threadBlock>>>(data,w,h); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { invert<<<gridBlock,threadBlock>>>(data,w,h); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { invert<<<gridBlock,threadBlock>>>(data,w,h); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
85292a83ab130bb84230c29766479327a7e74aab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <thrust/random.h> #include <thrust/sort.h> #include <iostream> #include <vector> #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sample_prob.h" #include "paddle/fluid/operators/math/sampler.h" namespace paddle { namespace operators { namespace math { using Tensor = framework::Tensor; template <typename T> __device__ T gpu_adjust_prob(const T prob, const int num_samples, const int num_tries) { if (num_samples == num_tries) { return prob * num_samples; } else { return -expm1(num_tries * log1p(-prob)); } } class GPULogUniformSampler { public: __device__ int64_t Sample(float random, const int range, const float log_range) const; __device__ float Probability(int64_t value, const float log_range) const; }; __device__ int64_t GPULogUniformSampler::Sample(float random, const int range, const float log_range) const { // Got Log Uniform distribution from uniform distribution by // inverse_transform_sampling method const int64_t value = static_cast<int64_t>(exp(random * log_range)) - 1; // Mathematically, value should be <= range_, but might not be due to some // floating point roundoff, so we mod by range_. return value % range; } __device__ float GPULogUniformSampler::Probability( int64_t value, const float log_range) const { // Given f(x) = 1/[(x+1) * log_range_] // The value's probability is integral of f(x) from value to (value + 1) return (log((value + 2.0) / (value + 1.0))) / log_range; } template <typename T> __global__ void SamplingCondidate( const size_t n, const int num_tries, const int range, const float log_range, const int num_true, const std::size_t num_samples, const int64_t* label_data, int64_t* samples_data, T* probabilities_data) { const int num_sampled_classes = num_true + num_samples; int idx = blockDim.x * blockIdx.x + threadIdx.x; int step_size = 0; GPULogUniformSampler sampler; for (; idx < n; idx += blockDim.x * gridDim.x) { int col_idx = idx % num_sampled_classes; int row_idx = idx / num_sampled_classes; if (col_idx < num_true) { samples_data[idx] = label_data[row_idx * num_true + col_idx]; } else { samples_data[idx] = samples_data[col_idx]; } probabilities_data[idx] = sampler.Probability(samples_data[idx], log_range); probabilities_data[idx] = gpu_adjust_prob(probabilities_data[idx], num_samples, num_tries); } } template <typename T> int UniqSampler(const Sampler& sampler, const std::size_t num_samples, int64_t* samples_data) { // sample num_samles unique samples for an example, note that they are not // all negative samples std::unordered_set<int64_t> tmp_samples; tmp_samples.clear(); int num_tries = 0; int j = 0; while (j < num_samples) { ++num_tries; auto v = sampler.Sample(); auto insert_ok = tmp_samples.insert(v).second; if (!insert_ok) { continue; } samples_data[j] = v; ++j; } return num_tries; } template <typename T> void GPUSampleWithProb<T>::operator()( const platform::CUDADeviceContext& context, const int seed, const int dict_size, const bool uniq, const std::size_t num_samples, const Tensor* L, Tensor* S, Tensor* P) { // UNDERSTAND: dimension issues const auto lbl_dim = L->dims(); const int batch_size = lbl_dim[0]; const int num_true = lbl_dim[1]; const int num_sampled_classes = num_true + num_samples; framework::DDim ret_dim{batch_size, num_sampled_classes}; // UNDERSTAND: raw data view const int64_t* label_data = L->data<int64_t>(); int64_t* samples_data = S->data<int64_t>(); T* probabilities_data = P->data<T>(); int s_size = num_samples; framework::DDim s_dim{s_size}; Tensor s; int64_t* s_data = s.mutable_data<int64_t>(s_dim, platform::CPUPlace()); math::LogUniformSampler sampler(dict_size, seed); int range = dict_size; float log_range = log(range + 1); int num_tries = UniqSampler<T>(sampler, num_samples, s_data); VLOG(1) << "num_tries: " << num_tries; #ifdef PADDLE_WITH_HIP PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(samples_data + num_true, s_data, sizeof(int64_t) * num_samples, hipMemcpyHostToDevice)); #else PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(samples_data + num_true, s_data, sizeof(int64_t) * num_samples, hipMemcpyHostToDevice)); #endif int threads = 512; const size_t size = batch_size * num_sampled_classes; int grid = (batch_size * num_sampled_classes + threads - 1) / threads; #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(HIP_KERNEL_NAME(SamplingCondidate<T>), dim3(grid), dim3(threads), 0, context.stream(), size, num_tries, range, log_range, num_true, num_samples, label_data, samples_data, probabilities_data); #else hipLaunchKernelGGL(( SamplingCondidate<T>), dim3(grid), dim3(threads), 0, context.stream(), size, num_tries, range, log_range, num_true, num_samples, label_data, samples_data, probabilities_data); #endif } template class GPUSampleWithProb<float>; template class GPUSampleWithProb<double>; } // namespace math } // namespace operators } // namespace paddle
85292a83ab130bb84230c29766479327a7e74aab.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <thrust/random.h> #include <thrust/sort.h> #include <iostream> #include <vector> #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sample_prob.h" #include "paddle/fluid/operators/math/sampler.h" namespace paddle { namespace operators { namespace math { using Tensor = framework::Tensor; template <typename T> __device__ T gpu_adjust_prob(const T prob, const int num_samples, const int num_tries) { if (num_samples == num_tries) { return prob * num_samples; } else { return -expm1(num_tries * log1p(-prob)); } } class GPULogUniformSampler { public: __device__ int64_t Sample(float random, const int range, const float log_range) const; __device__ float Probability(int64_t value, const float log_range) const; }; __device__ int64_t GPULogUniformSampler::Sample(float random, const int range, const float log_range) const { // Got Log Uniform distribution from uniform distribution by // inverse_transform_sampling method const int64_t value = static_cast<int64_t>(exp(random * log_range)) - 1; // Mathematically, value should be <= range_, but might not be due to some // floating point roundoff, so we mod by range_. return value % range; } __device__ float GPULogUniformSampler::Probability( int64_t value, const float log_range) const { // Given f(x) = 1/[(x+1) * log_range_] // The value's probability is integral of f(x) from value to (value + 1) return (log((value + 2.0) / (value + 1.0))) / log_range; } template <typename T> __global__ void SamplingCondidate( const size_t n, const int num_tries, const int range, const float log_range, const int num_true, const std::size_t num_samples, const int64_t* label_data, int64_t* samples_data, T* probabilities_data) { const int num_sampled_classes = num_true + num_samples; int idx = blockDim.x * blockIdx.x + threadIdx.x; int step_size = 0; GPULogUniformSampler sampler; for (; idx < n; idx += blockDim.x * gridDim.x) { int col_idx = idx % num_sampled_classes; int row_idx = idx / num_sampled_classes; if (col_idx < num_true) { samples_data[idx] = label_data[row_idx * num_true + col_idx]; } else { samples_data[idx] = samples_data[col_idx]; } probabilities_data[idx] = sampler.Probability(samples_data[idx], log_range); probabilities_data[idx] = gpu_adjust_prob(probabilities_data[idx], num_samples, num_tries); } } template <typename T> int UniqSampler(const Sampler& sampler, const std::size_t num_samples, int64_t* samples_data) { // sample num_samles unique samples for an example, note that they are not // all negative samples std::unordered_set<int64_t> tmp_samples; tmp_samples.clear(); int num_tries = 0; int j = 0; while (j < num_samples) { ++num_tries; auto v = sampler.Sample(); auto insert_ok = tmp_samples.insert(v).second; if (!insert_ok) { continue; } samples_data[j] = v; ++j; } return num_tries; } template <typename T> void GPUSampleWithProb<T>::operator()( const platform::CUDADeviceContext& context, const int seed, const int dict_size, const bool uniq, const std::size_t num_samples, const Tensor* L, Tensor* S, Tensor* P) { // UNDERSTAND: dimension issues const auto lbl_dim = L->dims(); const int batch_size = lbl_dim[0]; const int num_true = lbl_dim[1]; const int num_sampled_classes = num_true + num_samples; framework::DDim ret_dim{batch_size, num_sampled_classes}; // UNDERSTAND: raw data view const int64_t* label_data = L->data<int64_t>(); int64_t* samples_data = S->data<int64_t>(); T* probabilities_data = P->data<T>(); int s_size = num_samples; framework::DDim s_dim{s_size}; Tensor s; int64_t* s_data = s.mutable_data<int64_t>(s_dim, platform::CPUPlace()); math::LogUniformSampler sampler(dict_size, seed); int range = dict_size; float log_range = log(range + 1); int num_tries = UniqSampler<T>(sampler, num_samples, s_data); VLOG(1) << "num_tries: " << num_tries; #ifdef PADDLE_WITH_HIP PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(samples_data + num_true, s_data, sizeof(int64_t) * num_samples, hipMemcpyHostToDevice)); #else PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpy(samples_data + num_true, s_data, sizeof(int64_t) * num_samples, cudaMemcpyHostToDevice)); #endif int threads = 512; const size_t size = batch_size * num_sampled_classes; int grid = (batch_size * num_sampled_classes + threads - 1) / threads; #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(HIP_KERNEL_NAME(SamplingCondidate<T>), dim3(grid), dim3(threads), 0, context.stream(), size, num_tries, range, log_range, num_true, num_samples, label_data, samples_data, probabilities_data); #else SamplingCondidate<T><<<grid, threads, 0, context.stream()>>>( size, num_tries, range, log_range, num_true, num_samples, label_data, samples_data, probabilities_data); #endif } template class GPUSampleWithProb<float>; template class GPUSampleWithProb<double>; } // namespace math } // namespace operators } // namespace paddle
199a0171383dd43c1181677bd69712a6733de577.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include "linalg/eltwise.cuh" #include "random/rng.cuh" #include "stats/sum.cuh" #include "test_utils.h" namespace MLCommon { namespace Stats { template <typename T> struct SumInputs { T tolerance; int rows, cols; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const SumInputs<T> &dims) { return os; } template <typename T> class SumTest : public ::testing::TestWithParam<SumInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<SumInputs<T>>::GetParam(); int rows = params.rows, cols = params.cols; int len = rows * cols; hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); allocate(data, len); T data_h[len]; for (int i = 0; i < len; i++) { data_h[i] = T(1); } updateDevice(data, data_h, len, stream); allocate(sum_act, cols); sum(sum_act, data, cols, rows, false, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(sum_act)); } protected: SumInputs<T> params; T *data, *sum_act; }; const std::vector<SumInputs<float>> inputsf = {{0.05f, 1024, 32, 1234ULL}, {0.05f, 1024, 256, 1234ULL}}; const std::vector<SumInputs<double>> inputsd = {{0.05, 1024, 32, 1234ULL}, {0.05, 1024, 256, 1234ULL}}; typedef SumTest<float> SumTestF; TEST_P(SumTestF, Result) { ASSERT_TRUE(devArrMatch(float(params.rows), sum_act, params.cols, CompareApprox<float>(params.tolerance))); } typedef SumTest<double> SumTestD; TEST_P(SumTestD, Result) { ASSERT_TRUE(devArrMatch(double(params.rows), sum_act, params.cols, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SumTests, SumTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(SumTests, SumTestD, ::testing::ValuesIn(inputsd)); } // end namespace Stats } // end namespace MLCommon
199a0171383dd43c1181677bd69712a6733de577.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include "linalg/eltwise.cuh" #include "random/rng.cuh" #include "stats/sum.cuh" #include "test_utils.h" namespace MLCommon { namespace Stats { template <typename T> struct SumInputs { T tolerance; int rows, cols; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const SumInputs<T> &dims) { return os; } template <typename T> class SumTest : public ::testing::TestWithParam<SumInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<SumInputs<T>>::GetParam(); int rows = params.rows, cols = params.cols; int len = rows * cols; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); allocate(data, len); T data_h[len]; for (int i = 0; i < len; i++) { data_h[i] = T(1); } updateDevice(data, data_h, len, stream); allocate(sum_act, cols); sum(sum_act, data, cols, rows, false, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(sum_act)); } protected: SumInputs<T> params; T *data, *sum_act; }; const std::vector<SumInputs<float>> inputsf = {{0.05f, 1024, 32, 1234ULL}, {0.05f, 1024, 256, 1234ULL}}; const std::vector<SumInputs<double>> inputsd = {{0.05, 1024, 32, 1234ULL}, {0.05, 1024, 256, 1234ULL}}; typedef SumTest<float> SumTestF; TEST_P(SumTestF, Result) { ASSERT_TRUE(devArrMatch(float(params.rows), sum_act, params.cols, CompareApprox<float>(params.tolerance))); } typedef SumTest<double> SumTestD; TEST_P(SumTestD, Result) { ASSERT_TRUE(devArrMatch(double(params.rows), sum_act, params.cols, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SumTests, SumTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(SumTests, SumTestD, ::testing::ValuesIn(inputsd)); } // end namespace Stats } // end namespace MLCommon
647a0d0668727d4fb3e99f783c8708e8aac4e011.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/magma_zmcsrcompressor_gpu.cu normal z -> d, Tue Feb 9 16:05:45 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE1 256 #define BLOCK_SIZE2 1 // copy nonzeros into new structure __global__ void magma_dmcsrgpu_kernel1( int num_rows, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ double zero = MAGMA_D_ZERO; int start = A_rowptr[ row ]; int new_location = start; int end = A_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ // B_val[new_location] = A_val[j]; // B_colind[new_location] = A_colind[j]; new_location++; } } // this is not a correctr rowpointer! this is nn_z in this row! B_rowptr[ row ] = new_location-start; } } // generate a valid rowpointer __global__ void magma_dmcsrgpu_kernel2( int num_rows, magma_index_t *B_rowptr, magma_index_t *A_rowptr ) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int j, nnz = 0; if( idx == 0 ){ A_rowptr[ 0 ] = nnz; for( j=0; j<num_rows; j++ ){ nnz+=B_rowptr[ j ]; A_rowptr[ j+1 ] = nnz; } } } // copy new structure into original matrix __global__ void magma_dmcsrgpu_kernel3( int num_rows, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind, magma_index_t *B2_rowptr, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j, new_location; if(row<num_rows){ new_location = A_rowptr[ row ]; int start = B2_rowptr[ row ]; int end = B2_rowptr[ row+1 ]; double zero = MAGMA_D_ZERO; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ B_val[new_location] = A_val[j]; B_colind[new_location] = A_colind[j]; new_location++; } // A_val[ j ] = B_val[ j ]; // A_colind[ j ] = B_colind[ j ]; } } } /** Purpose ------- Removes zeros in a CSR matrix. This is a GPU implementation of the CSR compressor. Arguments --------- @param[in,out] A magma_d_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_dmcsrcompressor_gpu( magma_d_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_d_matrix B={Magma_CSR}, B2={Magma_CSR}; magma_d_matrix dA={Magma_CSR}, CSRA={Magma_CSR}; magma_index_t *cputmp = NULL; if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) { CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 )); CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 )); magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1, queue ); dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) ); // copying the nonzeros into B and write in B.drow how many there are hipLaunchKernelGGL(( magma_dmcsrgpu_kernel1), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue->cuda_stream() , A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol ); // correct the row pointer dim3 grid2( 1, 1, 1); hipLaunchKernelGGL(( magma_dmcsrgpu_kernel2), dim3(grid2), dim3(BLOCK_SIZE2), 0, queue->cuda_stream() , A->num_rows, B.drow, A->drow ); // access the true number of nonzeros CHECK( magma_index_malloc_cpu( &cputmp, 1 )); magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1, queue ); A->nnz = (magma_int_t) cputmp[0]; // reallocate with right size CHECK( magma_dmalloc( &B.dval, A->nnz )); CHECK( magma_index_malloc( &B.dcol, A->nnz )); // copy correct values back hipLaunchKernelGGL(( magma_dmcsrgpu_kernel3), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue->cuda_stream() , A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol ); magma_free( A->dcol ); magma_free( A->dval ); A->dcol = B.dcol; A->dval = B.dval; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_dmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue )); CHECK( magma_dmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue )); CHECK( magma_dmcsrcompressor_gpu( &dA, queue )); magma_dmfree( &dA, queue ); magma_dmfree( A, queue ); CHECK( magma_dmtransfer( dA, &CSRA, Magma_DEV, A_location, queue )); CHECK( magma_dmconvert( CSRA, A, Magma_CSR, A_storage, queue )); magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); } cleanup: magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); magma_free( B2.drow ); magma_free( B.drow ); return info; }
647a0d0668727d4fb3e99f783c8708e8aac4e011.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/magma_zmcsrcompressor_gpu.cu normal z -> d, Tue Feb 9 16:05:45 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE1 256 #define BLOCK_SIZE2 1 // copy nonzeros into new structure __global__ void magma_dmcsrgpu_kernel1( int num_rows, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ double zero = MAGMA_D_ZERO; int start = A_rowptr[ row ]; int new_location = start; int end = A_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ // B_val[new_location] = A_val[j]; // B_colind[new_location] = A_colind[j]; new_location++; } } // this is not a correctr rowpointer! this is nn_z in this row! B_rowptr[ row ] = new_location-start; } } // generate a valid rowpointer __global__ void magma_dmcsrgpu_kernel2( int num_rows, magma_index_t *B_rowptr, magma_index_t *A_rowptr ) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int j, nnz = 0; if( idx == 0 ){ A_rowptr[ 0 ] = nnz; for( j=0; j<num_rows; j++ ){ nnz+=B_rowptr[ j ]; A_rowptr[ j+1 ] = nnz; } } } // copy new structure into original matrix __global__ void magma_dmcsrgpu_kernel3( int num_rows, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind, magma_index_t *B2_rowptr, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j, new_location; if(row<num_rows){ new_location = A_rowptr[ row ]; int start = B2_rowptr[ row ]; int end = B2_rowptr[ row+1 ]; double zero = MAGMA_D_ZERO; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ B_val[new_location] = A_val[j]; B_colind[new_location] = A_colind[j]; new_location++; } // A_val[ j ] = B_val[ j ]; // A_colind[ j ] = B_colind[ j ]; } } } /** Purpose ------- Removes zeros in a CSR matrix. This is a GPU implementation of the CSR compressor. Arguments --------- @param[in,out] A magma_d_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_dmcsrcompressor_gpu( magma_d_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_d_matrix B={Magma_CSR}, B2={Magma_CSR}; magma_d_matrix dA={Magma_CSR}, CSRA={Magma_CSR}; magma_index_t *cputmp = NULL; if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) { CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 )); CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 )); magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1, queue ); dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) ); // copying the nonzeros into B and write in B.drow how many there are magma_dmcsrgpu_kernel1<<< grid1, BLOCK_SIZE1, 0, queue->cuda_stream() >>> ( A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol ); // correct the row pointer dim3 grid2( 1, 1, 1); magma_dmcsrgpu_kernel2<<< grid2, BLOCK_SIZE2, 0, queue->cuda_stream() >>> ( A->num_rows, B.drow, A->drow ); // access the true number of nonzeros CHECK( magma_index_malloc_cpu( &cputmp, 1 )); magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1, queue ); A->nnz = (magma_int_t) cputmp[0]; // reallocate with right size CHECK( magma_dmalloc( &B.dval, A->nnz )); CHECK( magma_index_malloc( &B.dcol, A->nnz )); // copy correct values back magma_dmcsrgpu_kernel3<<< grid1, BLOCK_SIZE1, 0, queue->cuda_stream() >>> ( A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol ); magma_free( A->dcol ); magma_free( A->dval ); A->dcol = B.dcol; A->dval = B.dval; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_dmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue )); CHECK( magma_dmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue )); CHECK( magma_dmcsrcompressor_gpu( &dA, queue )); magma_dmfree( &dA, queue ); magma_dmfree( A, queue ); CHECK( magma_dmtransfer( dA, &CSRA, Magma_DEV, A_location, queue )); CHECK( magma_dmconvert( CSRA, A, Magma_CSR, A_storage, queue )); magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); } cleanup: magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); magma_free( B2.drow ); magma_free( B.drow ); return info; }
4ad986a013c1a2887ea049663abafa0c2754c668.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=[20,67] --blockDim=[32,8] #define blockSize_x 32 #define blockSize_y 8 // RAD is the radius of the region of support for the search #define RAD 8 // STEPS is the number of loads we must perform to initialize the shared memory area // (see convolution SDK sample for example) #define STEPS 3 texture<unsigned int, hipTextureType2D, hipReadModeElementType> tex2Dleft; texture<unsigned int, hipTextureType2D, hipReadModeElementType> tex2Dright; __device__ static __attribute__((always_inline)) unsigned int __usad4(unsigned int A, unsigned int B, unsigned int C=0); /* IMPERIAL EDIT: inline asm commented out { unsigned int result; #if (__CUDA_ARCH__ >= 300) // Kepler (SM 3.x) supports a 4 vector SAD SIMD asm("vabsdiff4.u32.u32.u32.add" " %0, %1, %2, %3;": "=r"(result):"r"(A), "r"(B), "r"(C)); #else // SM 2.0 // Fermi (SM 2.x) supports only 1 SAD SIMD, so there are 4 instructions asm("vabsdiff.u32.u32.u32.add" " %0, %1.b0, %2.b0, %3;": "=r"(result):"r"(A), "r"(B), "r"(C)); asm("vabsdiff.u32.u32.u32.add" " %0, %1.b1, %2.b1, %3;": "=r"(result):"r"(A), "r"(B), "r"(result)); asm("vabsdiff.u32.u32.u32.add" " %0, %1.b2, %2.b2, %3;": "=r"(result):"r"(A), "r"(B), "r"(result)); asm("vabsdiff.u32.u32.u32.add" " %0, %1.b3, %2.b3, %3;": "=r"(result):"r"(A), "r"(B), "r"(result)); #endif return result; } */ __global__ void stereoDisparityKernel(unsigned int *g_img0, unsigned int *g_img1, unsigned int *g_odata, int w, int h, int minDisparity, int maxDisparity) { __requires(w == 640); // access thread id const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; const unsigned int sidx = threadIdx.x+RAD; const unsigned int sidy = threadIdx.y+RAD; unsigned int imLeft; unsigned int imRight; unsigned int cost; unsigned int bestCost = 9999999; unsigned int bestDisparity = 0; __shared__ unsigned int diff[blockSize_y+2*RAD][blockSize_x+2*RAD]; // store needed values for left image into registers (constant indexed local vars) unsigned int imLeftA[STEPS]; unsigned int imLeftB[STEPS]; for (int i=0; i<STEPS; i++) { int offset = -RAD + i*RAD; imLeftA[i] = tex2D(tex2Dleft, tidx-RAD, tidy+offset); imLeftB[i] = tex2D(tex2Dleft, tidx-RAD+blockSize_x, tidy+offset); } // for a fixed camera system this could be hardcoded and loop unrolled for (int d=minDisparity; d<=maxDisparity; d++) { //LEFT #pragma unroll for (int i=0; __global_invariant(__write_implies(diff, __write_offset_bytes(diff)/sizeof(unsigned int)%(blockSize_x + 2 * RAD) == sidx - RAD)), __global_invariant(__write_implies(diff, (__write_offset_bytes(diff)/sizeof(unsigned int)/(blockSize_x + 2 * RAD) - sidy + RAD)%RAD == 0)), i<STEPS; i++) { int offset = -RAD + i*RAD; //imLeft = tex2D( tex2Dleft, tidx-RAD, tidy+offset ); imLeft = imLeftA[i]; imRight = tex2D(tex2Dright, tidx-RAD+d, tidy+offset); cost = __usad4(imLeft, imRight); diff[sidy+offset][sidx-RAD] = cost; } //RIGHT #pragma unroll for (int i=0; __global_invariant(__write_implies(diff, (__write_offset_bytes(diff)/sizeof(unsigned int)%(blockSize_x + 2 * RAD) == sidx - RAD + blockSize_x) | (__write_offset_bytes(diff)/sizeof(unsigned int)%(blockSize_x + 2 * RAD) == sidx - RAD))), __global_invariant(__write_implies(diff, (__write_offset_bytes(diff)/sizeof(unsigned int)/(blockSize_x + 2 * RAD) - sidy + RAD)%RAD == 0)), i<STEPS; i++) { int offset = -RAD + i*RAD; if (threadIdx.x < 2*RAD) { //imLeft = tex2D( tex2Dleft, tidx-RAD+blockSize_x, tidy+offset ); imLeft = imLeftB[i]; imRight = tex2D(tex2Dright, tidx-RAD+blockSize_x+d, tidy+offset); cost = __usad4(imLeft, imRight); diff[sidy+offset][sidx-RAD+blockSize_x] = cost; } } __syncthreads(); // sum cost horizontally #pragma unroll for (int j=0; j<STEPS; j++) { int offset = -RAD + j*RAD; cost = 0; #pragma unroll for (int i=-RAD; i<=RAD ; i++) { cost += diff[sidy+offset][sidx+i]; } __syncthreads(); diff[sidy+offset][sidx] = cost; __syncthreads(); } // sum cost vertically cost = 0; #pragma unroll for (int i=-RAD; i<=RAD ; i++) { cost += diff[sidy+i][sidx]; } // see if it is better or not if (cost < bestCost) { bestCost = cost; bestDisparity = d+8; } __syncthreads(); } if (tidy < h && tidx < w) { g_odata[tidy*w + tidx] = bestDisparity; } }
4ad986a013c1a2887ea049663abafa0c2754c668.cu
//pass //--gridDim=[20,67] --blockDim=[32,8] #define blockSize_x 32 #define blockSize_y 8 // RAD is the radius of the region of support for the search #define RAD 8 // STEPS is the number of loads we must perform to initialize the shared memory area // (see convolution SDK sample for example) #define STEPS 3 texture<unsigned int, cudaTextureType2D, cudaReadModeElementType> tex2Dleft; texture<unsigned int, cudaTextureType2D, cudaReadModeElementType> tex2Dright; __device__ static __attribute__((always_inline)) unsigned int __usad4(unsigned int A, unsigned int B, unsigned int C=0); /* IMPERIAL EDIT: inline asm commented out { unsigned int result; #if (__CUDA_ARCH__ >= 300) // Kepler (SM 3.x) supports a 4 vector SAD SIMD asm("vabsdiff4.u32.u32.u32.add" " %0, %1, %2, %3;": "=r"(result):"r"(A), "r"(B), "r"(C)); #else // SM 2.0 // Fermi (SM 2.x) supports only 1 SAD SIMD, so there are 4 instructions asm("vabsdiff.u32.u32.u32.add" " %0, %1.b0, %2.b0, %3;": "=r"(result):"r"(A), "r"(B), "r"(C)); asm("vabsdiff.u32.u32.u32.add" " %0, %1.b1, %2.b1, %3;": "=r"(result):"r"(A), "r"(B), "r"(result)); asm("vabsdiff.u32.u32.u32.add" " %0, %1.b2, %2.b2, %3;": "=r"(result):"r"(A), "r"(B), "r"(result)); asm("vabsdiff.u32.u32.u32.add" " %0, %1.b3, %2.b3, %3;": "=r"(result):"r"(A), "r"(B), "r"(result)); #endif return result; } */ __global__ void stereoDisparityKernel(unsigned int *g_img0, unsigned int *g_img1, unsigned int *g_odata, int w, int h, int minDisparity, int maxDisparity) { __requires(w == 640); // access thread id const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; const unsigned int sidx = threadIdx.x+RAD; const unsigned int sidy = threadIdx.y+RAD; unsigned int imLeft; unsigned int imRight; unsigned int cost; unsigned int bestCost = 9999999; unsigned int bestDisparity = 0; __shared__ unsigned int diff[blockSize_y+2*RAD][blockSize_x+2*RAD]; // store needed values for left image into registers (constant indexed local vars) unsigned int imLeftA[STEPS]; unsigned int imLeftB[STEPS]; for (int i=0; i<STEPS; i++) { int offset = -RAD + i*RAD; imLeftA[i] = tex2D(tex2Dleft, tidx-RAD, tidy+offset); imLeftB[i] = tex2D(tex2Dleft, tidx-RAD+blockSize_x, tidy+offset); } // for a fixed camera system this could be hardcoded and loop unrolled for (int d=minDisparity; d<=maxDisparity; d++) { //LEFT #pragma unroll for (int i=0; __global_invariant(__write_implies(diff, __write_offset_bytes(diff)/sizeof(unsigned int)%(blockSize_x + 2 * RAD) == sidx - RAD)), __global_invariant(__write_implies(diff, (__write_offset_bytes(diff)/sizeof(unsigned int)/(blockSize_x + 2 * RAD) - sidy + RAD)%RAD == 0)), i<STEPS; i++) { int offset = -RAD + i*RAD; //imLeft = tex2D( tex2Dleft, tidx-RAD, tidy+offset ); imLeft = imLeftA[i]; imRight = tex2D(tex2Dright, tidx-RAD+d, tidy+offset); cost = __usad4(imLeft, imRight); diff[sidy+offset][sidx-RAD] = cost; } //RIGHT #pragma unroll for (int i=0; __global_invariant(__write_implies(diff, (__write_offset_bytes(diff)/sizeof(unsigned int)%(blockSize_x + 2 * RAD) == sidx - RAD + blockSize_x) | (__write_offset_bytes(diff)/sizeof(unsigned int)%(blockSize_x + 2 * RAD) == sidx - RAD))), __global_invariant(__write_implies(diff, (__write_offset_bytes(diff)/sizeof(unsigned int)/(blockSize_x + 2 * RAD) - sidy + RAD)%RAD == 0)), i<STEPS; i++) { int offset = -RAD + i*RAD; if (threadIdx.x < 2*RAD) { //imLeft = tex2D( tex2Dleft, tidx-RAD+blockSize_x, tidy+offset ); imLeft = imLeftB[i]; imRight = tex2D(tex2Dright, tidx-RAD+blockSize_x+d, tidy+offset); cost = __usad4(imLeft, imRight); diff[sidy+offset][sidx-RAD+blockSize_x] = cost; } } __syncthreads(); // sum cost horizontally #pragma unroll for (int j=0; j<STEPS; j++) { int offset = -RAD + j*RAD; cost = 0; #pragma unroll for (int i=-RAD; i<=RAD ; i++) { cost += diff[sidy+offset][sidx+i]; } __syncthreads(); diff[sidy+offset][sidx] = cost; __syncthreads(); } // sum cost vertically cost = 0; #pragma unroll for (int i=-RAD; i<=RAD ; i++) { cost += diff[sidy+i][sidx]; } // see if it is better or not if (cost < bestCost) { bestCost = cost; bestDisparity = d+8; } __syncthreads(); } if (tidy < h && tidx < w) { g_odata[tidy*w + tidx] = bestDisparity; } }
cc9c605f0a1bfca1ad659f1447548ca734ebeb4b.hip
// !!! This is a file automatically generated by hipify!!! /* GEMM is a General Matrix Multiply - a subroutine in the Basic Linear Algebra Subprograms library*/ /* Includes, system */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> /* Includes, cuda */ #include <hip/hip_runtime.h> #include <rocblas.h> #include <helper_cuda.h> #define BLOCK_SIZE 16 /* ======================================================= */ /* CUDA implementation of dGEMM without using shared memory /* ======================================================= */ __global__ void cuda_dgemm(int n, double alpha, const double *A, const double *B, double beta, double *C) { int row = blockDim.y * blockIdx.y + threadIdx.y; int col = blockDim.x * blockIdx.x + threadIdx.x; //printf("row = %d col = %d n= %d\n", row, col, n); if (row >= n || col >= n) return; double prod = 0; for (int k = 0; k < n; ++k){ prod += B[row * n + k] * A[k * n + col]; //printf(" %d %d %d %f %f\n",k,row,col,A[row*n+k],B[k*n+col]); } //printf("prod = %f\n", prod); C[row*n + col] = alpha * prod + beta * C[row*n+col]; } /* ======================================================= */ /* CUDA implementation of dGEMM using shared memory /* ======================================================= */ __global__ void cuda_dgemm_shmem(int n, double alpha, const double *B, const double *A, double beta, double *C) { // Block index int block_col = blockIdx.x; int block_row = blockIdx.y; // Thread index int thread_col = threadIdx.x; int thread_row = threadIdx.y; //printf("row = %d col = %d n= %d\n", block_col, block_row, n); //int row = blockDim.y * blockIdx.y + threadIdx.y; //int col = blockDim.x * blockIdx.x + threadIdx.x; int aBegin = n * blockDim.x * block_row; int aEnd = aBegin + n-1; int bBegin = blockDim.x * block_col; int bStep = n * blockDim.x; double Csub = 0; for (int a=aBegin, b=bBegin, istep=0; a <= aEnd; a+= blockDim.x, b+=bStep, ++istep){ __shared__ double As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE]; if ((istep*blockDim.x+thread_col < n) && (block_row*blockDim.x+ thread_row < n)) As[thread_row][thread_col] = A[a + n * thread_row + thread_col]; else As[thread_row][thread_col] = 0; if ((block_col*blockDim.x+thread_col < n) && (istep*blockDim.x + thread_row < n)) Bs[thread_row][thread_col] = B[b + n * thread_row + thread_col]; else Bs[thread_row][thread_col] = 0; __syncthreads(); // calculate the cell for (int k = 0; k < blockDim.x; ++k) Csub += As[thread_row][k] * Bs[k][thread_col]; __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element int c = n * blockDim.x * block_row + blockDim.x * block_col; if ((block_col*blockDim.x+thread_col < n) && (block_row*blockDim.x+ thread_row < n)) C[c + n * thread_row + thread_col] = alpha * Csub + beta * C[c +n * thread_row + thread_col]; } /* ======================================================= */ /* Simple host implementation of a simple version of sgemm */ /* ======================================================= */ static void simple_dgemm(int n, double alpha, const double *A, const double *B, double beta, double *C) { int i, j, k; for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j){ double prod = 0; for (k = 0; k < n; ++k){ prod += A[k * n + i] * B[j * n + k]; } C[j * n + i] = alpha * prod + beta * C[j * n + i]; } } } /* ======================= */ /* dgemm from BLAS library */ /* ======================= */ extern "C"{ extern void dgemm_(char *, char * , int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); }; /* ==== */ /* Main */ /* ==== */ int main(int argc, char **argv) { hipblasStatus_t status; double *h_A, *h_B, *h_C, *h_C_blas, *h_C_simple, *h_C_0; double *d_A = 0; double *d_B = 0; double *d_C = 0; double alpha = 1.0f; double beta = 0.0f; int n2, N; int i; double error_norm1, error_norm2; double ref_norm; double diff1, diff2; hipblasHandle_t handle; struct timeval tv1, tv2; /* get the size of the matrix from the command line */ if (argc <2 ) N= 275; else N = atoi(argv[1]); //N=3; n2 = N * N; printf("\nRunning dgemm test for %d by %d matricies.\n", N, N); /* Initialize CUBLAS */ status = hipblasCreate(&handle); /* Allocate host memory for the matrices */ h_A = (double *)malloc(n2 * sizeof(double) ); h_B = (double *)malloc(n2 * sizeof(double) ); h_C = (double *)malloc(n2 * sizeof(double) ); h_C_blas = (double *)malloc(n2 * sizeof(double) ); h_C_simple = (double *)malloc(n2 * sizeof(double) ); h_C_0 = (double *)malloc(n2 * sizeof(double) ); /* Fill the matrices with test data */ for (i = 0; i < n2; i++){ h_A[i] = rand() / (double)RAND_MAX; h_B[i] = rand() / (double)RAND_MAX; h_C[i] = rand() / (double)RAND_MAX; h_C_blas[i] = h_C[i]; h_C_simple[i] = h_C[i]; h_C_0[i] = h_C[i]; //printf("%f %f \n",h_A[i], h_B[i]); } printf("\tTesting dgemm function from cuBLAS library.\n"); gettimeofday(&tv1, NULL); /* Allocate device memory for the matrices */ hipMalloc((void **)&d_A, n2 * sizeof(d_A[0])); hipMalloc((void **)&d_B, n2 * sizeof(d_B[0])); hipMalloc((void **)&d_C, n2 * sizeof(d_C[0])); /* Initialize the device matrices with the host matrices */ status = hipblasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1); status = hipblasSetVector(n2, sizeof(h_B[0]), h_B, 1, d_B, 1); status = hipblasSetVector(n2, sizeof(h_C[0]), h_C, 1, d_C, 1); /* Performs operation using cublas */ status = hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N); /* Read the result back */ status = hipblasGetVector(n2, sizeof(h_C[0]), d_C, 1, h_C, 1); gettimeofday(&tv2, NULL); printf("\t\tdone...\n"); printf("\t\tExecution time (in millisec): %.2f\n", (double)(tv2.tv_usec-tv1.tv_usec)/1000 + (double)(tv2.tv_sec -tv1.tv_sec )*1000); /* free cuda memory */ hipFree(d_A); hipFree(d_B); hipFree(d_C); /* ============ CUDA implementation without shared memory =============== */ printf("\tTesting CUDA dgemm function without using Shared memory.\n"); gettimeofday(&tv1, NULL); /* Allocate device memory for the matrices */ hipMalloc((void **)&d_A, n2 * sizeof(d_A[0])); hipMalloc((void **)&d_B, n2 * sizeof(d_B[0])); hipMalloc((void **)&d_C, n2 * sizeof(d_C[0])); /* copy A and B matrices to gpu */ hipMemcpy(d_A, h_A,n2*sizeof(d_A[0]), hipMemcpyHostToDevice); hipMemcpy(d_B, h_B,n2*sizeof(d_B[0]), hipMemcpyHostToDevice); hipMemcpy(d_C, h_C_0,n2*sizeof(d_C[0]), hipMemcpyHostToDevice); /* Kernel */ dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(N/BLOCK_SIZE+1, N/BLOCK_SIZE+1); //printf(" beta=%f\n",beta); hipLaunchKernelGGL(( cuda_dgemm), dim3(dimGrid), dim3(dimBlock), 0, 0, N, alpha, d_A, d_B, beta, d_C); /* wait until all threads finish their job */ hipDeviceSynchronize(); /* Read the result back */ hipMemcpy(h_C, d_C,n2*sizeof(d_C[0]), hipMemcpyDeviceToHost); gettimeofday(&tv2, NULL); printf("\t\tdone...\n"); printf("\t\tExecution time (in millisec): %.2f\n", (double)(tv2.tv_usec-tv1.tv_usec)/1000 + (double)(tv2.tv_sec -tv1.tv_sec )*1000); /* free cuda memory */ hipFree(d_A); hipFree(d_B); hipFree(d_C); /* ============ CUDA implementation using shared memory =============== */ printf("\tTesting CUDA dgemm function using Shared memory.\n"); gettimeofday(&tv1, NULL); /* Allocate device memory for the matrices */ hipMalloc((void **)&d_A, n2 * sizeof(d_A[0])); hipMalloc((void **)&d_B, n2 * sizeof(d_B[0])); hipMalloc((void **)&d_C, n2 * sizeof(d_C[0])); /* copy A and B matrices to gpu */ hipMemcpy(d_A, h_A,n2*sizeof(d_A[0]), hipMemcpyHostToDevice); hipMemcpy(d_B, h_B,n2*sizeof(d_B[0]), hipMemcpyHostToDevice); hipMemcpy(d_C, h_C_0,n2*sizeof(d_C[0]), hipMemcpyHostToDevice); /* Kernel */ hipLaunchKernelGGL(( cuda_dgemm_shmem), dim3(dimGrid), dim3(dimBlock), 0, 0, N, alpha, d_A, d_B, beta, d_C); /* wait until all threads finish their job */ hipDeviceSynchronize(); /* Read the result back */ hipMemcpy(h_C, d_C,n2*sizeof(d_C[0]), hipMemcpyDeviceToHost); gettimeofday(&tv2, NULL); printf("\t\tdone...\n"); printf("\t\tExecution time (in millisec): %.2f\n", (double)(tv2.tv_usec-tv1.tv_usec)/1000 + (double)(tv2.tv_sec -tv1.tv_sec )*1000); hipFree(d_A); hipFree(d_B); hipFree(d_C); /* Memory clean up */ free(h_A); free(h_B); free(h_C); free(h_C_simple); free(h_C_blas); /* Shutdown */ status = hipblasDestroy(handle); return(0); }
cc9c605f0a1bfca1ad659f1447548ca734ebeb4b.cu
/* GEMM is a General Matrix Multiply - a subroutine in the Basic Linear Algebra Subprograms library*/ /* Includes, system */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> /* Includes, cuda */ #include <cuda_runtime.h> #include <cublas_v2.h> #include <helper_cuda.h> #define BLOCK_SIZE 16 /* ======================================================= */ /* CUDA implementation of dGEMM without using shared memory /* ======================================================= */ __global__ void cuda_dgemm(int n, double alpha, const double *A, const double *B, double beta, double *C) { int row = blockDim.y * blockIdx.y + threadIdx.y; int col = blockDim.x * blockIdx.x + threadIdx.x; //printf("row = %d col = %d n= %d\n", row, col, n); if (row >= n || col >= n) return; double prod = 0; for (int k = 0; k < n; ++k){ prod += B[row * n + k] * A[k * n + col]; //printf(" %d %d %d %f %f\n",k,row,col,A[row*n+k],B[k*n+col]); } //printf("prod = %f\n", prod); C[row*n + col] = alpha * prod + beta * C[row*n+col]; } /* ======================================================= */ /* CUDA implementation of dGEMM using shared memory /* ======================================================= */ __global__ void cuda_dgemm_shmem(int n, double alpha, const double *B, const double *A, double beta, double *C) { // Block index int block_col = blockIdx.x; int block_row = blockIdx.y; // Thread index int thread_col = threadIdx.x; int thread_row = threadIdx.y; //printf("row = %d col = %d n= %d\n", block_col, block_row, n); //int row = blockDim.y * blockIdx.y + threadIdx.y; //int col = blockDim.x * blockIdx.x + threadIdx.x; int aBegin = n * blockDim.x * block_row; int aEnd = aBegin + n-1; int bBegin = blockDim.x * block_col; int bStep = n * blockDim.x; double Csub = 0; for (int a=aBegin, b=bBegin, istep=0; a <= aEnd; a+= blockDim.x, b+=bStep, ++istep){ __shared__ double As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE]; if ((istep*blockDim.x+thread_col < n) && (block_row*blockDim.x+ thread_row < n)) As[thread_row][thread_col] = A[a + n * thread_row + thread_col]; else As[thread_row][thread_col] = 0; if ((block_col*blockDim.x+thread_col < n) && (istep*blockDim.x + thread_row < n)) Bs[thread_row][thread_col] = B[b + n * thread_row + thread_col]; else Bs[thread_row][thread_col] = 0; __syncthreads(); // calculate the cell for (int k = 0; k < blockDim.x; ++k) Csub += As[thread_row][k] * Bs[k][thread_col]; __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element int c = n * blockDim.x * block_row + blockDim.x * block_col; if ((block_col*blockDim.x+thread_col < n) && (block_row*blockDim.x+ thread_row < n)) C[c + n * thread_row + thread_col] = alpha * Csub + beta * C[c +n * thread_row + thread_col]; } /* ======================================================= */ /* Simple host implementation of a simple version of sgemm */ /* ======================================================= */ static void simple_dgemm(int n, double alpha, const double *A, const double *B, double beta, double *C) { int i, j, k; for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j){ double prod = 0; for (k = 0; k < n; ++k){ prod += A[k * n + i] * B[j * n + k]; } C[j * n + i] = alpha * prod + beta * C[j * n + i]; } } } /* ======================= */ /* dgemm from BLAS library */ /* ======================= */ extern "C"{ extern void dgemm_(char *, char * , int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); }; /* ==== */ /* Main */ /* ==== */ int main(int argc, char **argv) { cublasStatus_t status; double *h_A, *h_B, *h_C, *h_C_blas, *h_C_simple, *h_C_0; double *d_A = 0; double *d_B = 0; double *d_C = 0; double alpha = 1.0f; double beta = 0.0f; int n2, N; int i; double error_norm1, error_norm2; double ref_norm; double diff1, diff2; cublasHandle_t handle; struct timeval tv1, tv2; /* get the size of the matrix from the command line */ if (argc <2 ) N= 275; else N = atoi(argv[1]); //N=3; n2 = N * N; printf("\nRunning dgemm test for %d by %d matricies.\n", N, N); /* Initialize CUBLAS */ status = cublasCreate(&handle); /* Allocate host memory for the matrices */ h_A = (double *)malloc(n2 * sizeof(double) ); h_B = (double *)malloc(n2 * sizeof(double) ); h_C = (double *)malloc(n2 * sizeof(double) ); h_C_blas = (double *)malloc(n2 * sizeof(double) ); h_C_simple = (double *)malloc(n2 * sizeof(double) ); h_C_0 = (double *)malloc(n2 * sizeof(double) ); /* Fill the matrices with test data */ for (i = 0; i < n2; i++){ h_A[i] = rand() / (double)RAND_MAX; h_B[i] = rand() / (double)RAND_MAX; h_C[i] = rand() / (double)RAND_MAX; h_C_blas[i] = h_C[i]; h_C_simple[i] = h_C[i]; h_C_0[i] = h_C[i]; //printf("%f %f \n",h_A[i], h_B[i]); } printf("\tTesting dgemm function from cuBLAS library.\n"); gettimeofday(&tv1, NULL); /* Allocate device memory for the matrices */ cudaMalloc((void **)&d_A, n2 * sizeof(d_A[0])); cudaMalloc((void **)&d_B, n2 * sizeof(d_B[0])); cudaMalloc((void **)&d_C, n2 * sizeof(d_C[0])); /* Initialize the device matrices with the host matrices */ status = cublasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1); status = cublasSetVector(n2, sizeof(h_B[0]), h_B, 1, d_B, 1); status = cublasSetVector(n2, sizeof(h_C[0]), h_C, 1, d_C, 1); /* Performs operation using cublas */ status = cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N); /* Read the result back */ status = cublasGetVector(n2, sizeof(h_C[0]), d_C, 1, h_C, 1); gettimeofday(&tv2, NULL); printf("\t\tdone...\n"); printf("\t\tExecution time (in millisec): %.2f\n", (double)(tv2.tv_usec-tv1.tv_usec)/1000 + (double)(tv2.tv_sec -tv1.tv_sec )*1000); /* free cuda memory */ cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); /* ============ CUDA implementation without shared memory =============== */ printf("\tTesting CUDA dgemm function without using Shared memory.\n"); gettimeofday(&tv1, NULL); /* Allocate device memory for the matrices */ cudaMalloc((void **)&d_A, n2 * sizeof(d_A[0])); cudaMalloc((void **)&d_B, n2 * sizeof(d_B[0])); cudaMalloc((void **)&d_C, n2 * sizeof(d_C[0])); /* copy A and B matrices to gpu */ cudaMemcpy(d_A, h_A,n2*sizeof(d_A[0]), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B,n2*sizeof(d_B[0]), cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C_0,n2*sizeof(d_C[0]), cudaMemcpyHostToDevice); /* Kernel */ dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(N/BLOCK_SIZE+1, N/BLOCK_SIZE+1); //printf(" beta=%f\n",beta); cuda_dgemm<<<dimGrid, dimBlock>>>(N, alpha, d_A, d_B, beta, d_C); /* wait until all threads finish their job */ cudaDeviceSynchronize(); /* Read the result back */ cudaMemcpy(h_C, d_C,n2*sizeof(d_C[0]), cudaMemcpyDeviceToHost); gettimeofday(&tv2, NULL); printf("\t\tdone...\n"); printf("\t\tExecution time (in millisec): %.2f\n", (double)(tv2.tv_usec-tv1.tv_usec)/1000 + (double)(tv2.tv_sec -tv1.tv_sec )*1000); /* free cuda memory */ cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); /* ============ CUDA implementation using shared memory =============== */ printf("\tTesting CUDA dgemm function using Shared memory.\n"); gettimeofday(&tv1, NULL); /* Allocate device memory for the matrices */ cudaMalloc((void **)&d_A, n2 * sizeof(d_A[0])); cudaMalloc((void **)&d_B, n2 * sizeof(d_B[0])); cudaMalloc((void **)&d_C, n2 * sizeof(d_C[0])); /* copy A and B matrices to gpu */ cudaMemcpy(d_A, h_A,n2*sizeof(d_A[0]), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B,n2*sizeof(d_B[0]), cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C_0,n2*sizeof(d_C[0]), cudaMemcpyHostToDevice); /* Kernel */ cuda_dgemm_shmem<<<dimGrid, dimBlock>>>(N, alpha, d_A, d_B, beta, d_C); /* wait until all threads finish their job */ cudaDeviceSynchronize(); /* Read the result back */ cudaMemcpy(h_C, d_C,n2*sizeof(d_C[0]), cudaMemcpyDeviceToHost); gettimeofday(&tv2, NULL); printf("\t\tdone...\n"); printf("\t\tExecution time (in millisec): %.2f\n", (double)(tv2.tv_usec-tv1.tv_usec)/1000 + (double)(tv2.tv_sec -tv1.tv_sec )*1000); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); /* Memory clean up */ free(h_A); free(h_B); free(h_C); free(h_C_simple); free(h_C_blas); /* Shutdown */ status = cublasDestroy(handle); return(0); }
6114d183bf5bd7b565413b434ea7c773505c1339.hip
// !!! This is a file automatically generated by hipify!!! # include <stdio.h> # include <stdlib.h> # include <cstdlib> # include <hip/hip_runtime.h> # include "rocblas.h" #include <time.h> #include <sys/time.h> void matprint(float *M,int row, int col){ printf ("\nMatrix %d by %d \n",row,col); for (int i=0;i<row;i ++){ for (int j=0;j<col;j ++){ printf ("%0.f ",M[i*col+j]); } printf ("\n"); } printf ("\n"); for(int i=0;i<col*row;i++){ printf ("%0.f ",M[i]); } printf ("\n"); printf ("\n"); } void matfill(float *M,int row, int col){ int ind =1; for(int j=0;j<col;j ++){ for(int i=0;i<row;i ++){ M[j*row+i]=ind++ ;//rand()%30; } } } double cpuTimer(){ struct timeval clock; gettimeofday(&clock, NULL); return ( (double)clock.tv_sec + (double)clock.tv_usec * 1.e-6 ); } int main ( void ){ hipblasHandle_t handle ; // CUBLAS context hipError_t err = hipSuccess; srand (time(NULL)); double ti; double duration; int m=3200; // a - mxk int n=1600; // b - kxn int k=2400; // P_G and P_C - mxn //creating matrices and allocate data %%%%%%%%%%%%%%%%%%%%%%%%%%%%% float * a; // mxk float * b; // kxn float *P_G; // mxn err = hipMallocManaged(&a,m*k*sizeof(float)); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device matrix M (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE);} err = hipMallocManaged(&b,k*n*sizeof(float)); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device matrix N (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE);} err = hipMallocManaged(&P_G,m*n*sizeof(float)); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device matrix P_G (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE);} // filling with data %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% matfill(a,m,k); matfill(b,k,n); hipblasCreate (& handle ); // initialize CUBLAS context float al =1; float bet =0; //matrix multiplication : c = al*a*b + bet *c ti = cpuTimer(); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,m,n,k,&al,a,m,b,k,&bet,P_G,m); hipDeviceSynchronize (); duration = cpuTimer() - ti; printf("\nCublas - Matrix Multiplication: Done,\nDuration: %.6f s",duration); hipFree (a); // free memory hipFree (b); // free memory hipFree (P_G); // free memory hipblasDestroy ( handle ); // destroy CUBLAS context return 0; }
6114d183bf5bd7b565413b434ea7c773505c1339.cu
# include <stdio.h> # include <stdlib.h> # include <cstdlib> # include <cuda_runtime.h> # include "cublas_v2.h" #include <time.h> #include <sys/time.h> void matprint(float *M,int row, int col){ printf ("\nMatrix %d by %d \n",row,col); for (int i=0;i<row;i ++){ for (int j=0;j<col;j ++){ printf ("%0.f ",M[i*col+j]); } printf ("\n"); } printf ("\n"); for(int i=0;i<col*row;i++){ printf ("%0.f ",M[i]); } printf ("\n"); printf ("\n"); } void matfill(float *M,int row, int col){ int ind =1; for(int j=0;j<col;j ++){ for(int i=0;i<row;i ++){ M[j*row+i]=ind++ ;//rand()%30; } } } double cpuTimer(){ struct timeval clock; gettimeofday(&clock, NULL); return ( (double)clock.tv_sec + (double)clock.tv_usec * 1.e-6 ); } int main ( void ){ cublasHandle_t handle ; // CUBLAS context cudaError_t err = cudaSuccess; srand (time(NULL)); double ti; double duration; int m=3200; // a - mxk int n=1600; // b - kxn int k=2400; // P_G and P_C - mxn //creating matrices and allocate data %%%%%%%%%%%%%%%%%%%%%%%%%%%%% float * a; // mxk float * b; // kxn float *P_G; // mxn err = cudaMallocManaged(&a,m*k*sizeof(float)); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device matrix M (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE);} err = cudaMallocManaged(&b,k*n*sizeof(float)); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device matrix N (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE);} err = cudaMallocManaged(&P_G,m*n*sizeof(float)); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device matrix P_G (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE);} // filling with data %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% matfill(a,m,k); matfill(b,k,n); cublasCreate (& handle ); // initialize CUBLAS context float al =1; float bet =0; //matrix multiplication : c = al*a*b + bet *c ti = cpuTimer(); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,m,n,k,&al,a,m,b,k,&bet,P_G,m); cudaDeviceSynchronize (); duration = cpuTimer() - ti; printf("\nCublas - Matrix Multiplication: Done,\nDuration: %.6f s",duration); cudaFree (a); // free memory cudaFree (b); // free memory cudaFree (P_G); // free memory cublasDestroy ( handle ); // destroy CUBLAS context return 0; }